diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 0000000000000..06328af71459c --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,14 @@ +# +# An auto defined `clippy` feature was introduced, +# but it was found to clash with user defined features, +# so was renamed to `cargo-clippy`. +# +# If you want standard clippy run: +# RUSTFLAGS= cargo clippy +[target.'cfg(feature = "cargo-clippy")'] +rustflags = [ + "-Aclippy::all", + "-Dclippy::correctness", + "-Aclippy::if-same-then-else", + "-Aclippy::clone-double-ref" +] diff --git a/.dockerignore b/.dockerignore index 39dbc05c97e16..c58599e3fb72b 100644 --- a/.dockerignore +++ b/.dockerignore @@ -4,3 +4,4 @@ doc Dockerfile .dockerignore .local +.env* diff --git a/.gitignore b/.gitignore index 0486a1a716e5c..f1103fdab93a5 100644 --- a/.gitignore +++ b/.gitignore @@ -21,7 +21,7 @@ rls*.log .local **/hfuzz_target/ **/hfuzz_workspace/ -.cargo/ .cargo-remote.toml *.bin *.iml +.maintain/node-template-release/Cargo.lock diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index c5cb6c571b475..6d4362ea93629 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -45,6 +45,8 @@ variables: &default-vars VAULT_SERVER_URL: "https://vault.parity-mgmt-vault.parity.io" VAULT_AUTH_PATH: "gitlab-parity-io-jwt" VAULT_AUTH_ROLE: "cicd_gitlab_parity_${CI_PROJECT_NAME}" + SIMNET_FEATURES_PATH: "simnet_tests/tests" + PIPELINE_SCRIPTS_TAG: "v0.1" default: cache: {} @@ -57,6 +59,14 @@ default: paths: - artifacts/ +.collect-artifacts-short: &collect-artifacts-short + artifacts: + name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}" + when: on_success + expire_in: 3 hours + paths: + - artifacts/ + .kubernetes-env: &kubernetes-env retry: max: 2 @@ -168,6 +178,21 @@ default: | tee artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA/::trie::read::small.json' - sccache -s +.build-linux-substrate-script: &build-linux-substrate-script + - WASM_BUILD_NO_COLOR=1 time cargo build --release --verbose + - mv ./target/release/substrate ./artifacts/substrate/. + - echo -n "Substrate version = " + - if [ "${CI_COMMIT_TAG}" ]; then + echo "${CI_COMMIT_TAG}" | tee ./artifacts/substrate/VERSION; + else + ./artifacts/substrate/substrate --version | + sed -n -E 's/^substrate ([0-9.]+.*-[0-9a-f]{7,13})-.*$/\1/p' | + tee ./artifacts/substrate/VERSION; + fi + - sha256sum ./artifacts/substrate/substrate | tee ./artifacts/substrate/substrate.sha256 + - cp -r .maintain/docker/substrate.Dockerfile ./artifacts/substrate/ + - sccache -s + #### Vault secrets .vault-secrets: &vault-secrets secrets: @@ -188,13 +213,13 @@ default: file: false AWX_TOKEN: vault: cicd/gitlab/$CI_PROJECT_PATH/AWX_TOKEN@kv - file: false + file: false CRATES_TOKEN: vault: cicd/gitlab/$CI_PROJECT_PATH/CRATES_TOKEN@kv file: false DOCKER_CHAOS_TOKEN: vault: cicd/gitlab/$CI_PROJECT_PATH/DOCKER_CHAOS_TOKEN@kv - file: false + file: false DOCKER_CHAOS_USER: vault: cicd/gitlab/$CI_PROJECT_PATH/DOCKER_CHAOS_USER@kv file: false @@ -311,7 +336,7 @@ cargo-deny: when: always paths: - deny.log - # FIXME: Temorarily allow to fail. + # FIXME: Temporarily allow to fail. allow_failure: true cargo-fmt: @@ -321,6 +346,13 @@ cargo-fmt: script: - cargo +nightly fmt --all -- --check +cargo-clippy: + stage: test + <<: *docker-env + <<: *test-refs + script: + - SKIP_WASM_BUILD=1 env -u RUSTFLAGS cargo +nightly clippy + cargo-check-benches: stage: test <<: *docker-env @@ -522,31 +554,40 @@ cargo-check-macos: #### stage: build -check-polkadot-companion-status: - stage: build - image: paritytech/tools:latest - <<: *kubernetes-env - <<: *vault-secrets - rules: - - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - script: - - ./.maintain/gitlab/check_polkadot_companion_status.sh - -check-polkadot-companion-build: +.check-dependent-project: &check-dependent-project stage: build <<: *docker-env <<: *test-refs-no-trigger <<: *vault-secrets - needs: - - job: test-linux-stable-int - artifacts: false script: - - ./.maintain/gitlab/check_polkadot_companion_build.sh - after_script: - - cd polkadot && git rev-parse --abbrev-ref HEAD - allow_failure: true + - git clone + --depth=1 + "--branch=$PIPELINE_SCRIPTS_TAG" + https://github.com/paritytech/pipeline-scripts + - ./pipeline-scripts/check_dependent_project.sh + paritytech + substrate + --substrate + "$DEPENDENT_REPO" + "$GITHUB_PR_TOKEN" + - cd "$DEPENDENT_REPO" && git rev-parse --abbrev-ref HEAD + +# Individual jobs are set up for each dependent project so that they can be ran in parallel. +# Arguably we could generate a job for each companion in the PR's description using Gitlab's +# parent-child pipelines but that's more complicated. + +check-dependent-polkadot: + <<: *check-dependent-project + variables: + DEPENDENT_REPO: polkadot + +check-dependent-cumulus: + <<: *check-dependent-project + variables: + DEPENDENT_REPO: cumulus -build-linux-substrate: &build-binary + +build-linux-substrate: stage: build <<: *collect-artifacts <<: *docker-env @@ -557,21 +598,21 @@ build-linux-substrate: &build-binary before_script: - mkdir -p ./artifacts/substrate/ script: - - WASM_BUILD_NO_COLOR=1 time cargo build --release --verbose - - mv ./target/release/substrate ./artifacts/substrate/. - - echo -n "Substrate version = " - - if [ "${CI_COMMIT_TAG}" ]; then - echo "${CI_COMMIT_TAG}" | tee ./artifacts/substrate/VERSION; - else - ./artifacts/substrate/substrate --version | - sed -n -E 's/^substrate ([0-9.]+.*-[0-9a-f]{7,13})-.*$/\1/p' | - tee ./artifacts/substrate/VERSION; - fi - - sha256sum ./artifacts/substrate/substrate | tee ./artifacts/substrate/substrate.sha256 + - *build-linux-substrate-script - printf '\n# building node-template\n\n' - ./.maintain/node-template-release.sh ./artifacts/substrate/substrate-node-template.tar.gz - - cp -r .maintain/docker/substrate.Dockerfile ./artifacts/substrate/ - - sccache -s + + +#Build binary for simnet quick tests. +build-linux-substrate-simnet: + stage: build + <<: *collect-artifacts-short + <<: *docker-env + <<: *test-refs-no-trigger-prs-only + before_script: + - mkdir -p ./artifacts/substrate/ + script: + - *build-linux-substrate-script build-linux-subkey: &build-subkey stage: build @@ -682,6 +723,25 @@ publish-docker-substrate: # https://docs.gitlab.com/ee/ci/multi_project_pipelines.html#with-variable-inheritance dotenv: ./artifacts/substrate/build.env +#publish artifact for quick simnet tests +publish-docker-substrate-simnet: + stage: publish + <<: *build-push-docker-image + <<: *test-refs-no-trigger-prs-only + needs: + - job: build-linux-substrate-simnet + artifacts: true + variables: + <<: *default-vars + GIT_STRATEGY: none + DOCKERFILE: $PRODUCT.Dockerfile + PRODUCT: substrate + IMAGE_NAME: docker.io/paritypr/$PRODUCT + artifacts: + reports: + # this artifact is used in simnet-tests-quick job + dotenv: ./artifacts/substrate/build.env + publish-docker-subkey: stage: publish <<: *build-push-docker-image @@ -763,12 +823,14 @@ publish-rustdoc: - cp README.md /tmp/doc/ - git checkout gh-pages # Remove directories no longer necessary, as specified in $RUSTDOCS_DEPLOY_REFS. - # Also ensure $RUSTDOCS_DEPLOY_REFS is non-space + # Also ensure $RUSTDOCS_DEPLOY_REFS is not just empty spaces. + # Even though this block spans multiple lines, they are concatenated to run as a single line + # command, so note for the semi-colons in the inner-most code block. - if [[ ! -z ${RUSTDOCS_DEPLOY_REFS// } ]]; then for FILE in *; do if [[ ! " $RUSTDOCS_DEPLOY_REFS " =~ " $FILE " ]]; then - echo "Removing ${FILE}..." - rm -rf $FILE + echo "Removing ${FILE}..."; + rm -rf $FILE; fi done fi @@ -841,7 +903,7 @@ deploy-prometheus-alerting-rules: # Runs "quick" and "long" tests on nightly schedule and on commit / merge to master # A "quick" test is a smoke test where basic check-expect tests run by # checking values from metrics exposed by the app. -# A "long" test is the load testing where we send 50K transactions into the +# A "long" test is the load testing where we send 50K transactions into the # network and check if all completed successfully simnet-tests: stage: deploy @@ -862,7 +924,7 @@ simnet-tests: before_script: - echo "Simnet Tests Config docker.io/paritytech/simnet:${SIMNET_REF} - ${SUBSTRATE_IMAGE_NAME} ${SUBSTRATE_IAMGE_TAG}" + ${SUBSTRATE_IMAGE_NAME} ${SUBSTRATE_IMAGE_TAG}" script: - /home/nonroot/simnet/gurke/scripts/run-test-environment-manager.sh --github-remote-dir="https://github.com/paritytech/substrate/tree/master/simnet_tests" @@ -870,4 +932,31 @@ simnet-tests: --image="${SUBSTRATE_IMAGE_NAME}:${SUBSTRATE_IMAGE_TAG}" retry: 2 tags: - - parity-simnet + - substrate-simnet + +#run quick simnet-tests for each PR +simnet-tests-quick: + stage: deploy + image: docker.io/paritytech/simnet:${SIMNET_REF} + <<: *kubernetes-env + <<: *vault-secrets + <<: *test-refs-no-trigger-prs-only + variables: + SIMNET_FEATURES: "${SIMNET_FEATURES_PATH}/quick" + needs: + - job: publish-docker-substrate-simnet + before_script: + - echo "Simnet Tests Config + docker.io/paritytech/simnet:${SIMNET_REF} + ${SUBSTRATE_IMAGE_NAME} ${SUBSTRATE_IMAGE_TAG}" + script: + - echo "Image=${SUBSTRATE_IMAGE_NAME}:${SUBSTRATE_IMAGE_TAG}" + - echo "Features=${SIMNET_FEATURES}" + - /home/nonroot/simnet/gurke/scripts/run-test-environment-manager.sh + --github-remote-dir="https://github.com/paritytech/substrate/tree/master/simnet_tests" + --config="simnet_tests/configs/default_local_testnet.toml" + --image="${SUBSTRATE_IMAGE_NAME}:${SUBSTRATE_IMAGE_TAG}" + --features="${SIMNET_FEATURES}" + tags: + - substrate-simnet + diff --git a/.maintain/Dockerfile b/.maintain/Dockerfile deleted file mode 100644 index 21a41720f7d65..0000000000000 --- a/.maintain/Dockerfile +++ /dev/null @@ -1,59 +0,0 @@ -# Note: We don't use Alpine and its packaged Rust/Cargo because they're too often out of date, -# preventing them from being used to build Substrate/Polkadot. - -FROM phusion/baseimage:0.11 as builder -LABEL maintainer="chevdor@gmail.com" -LABEL description="This is the build stage for Substrate. Here we create the binary." - -ENV DEBIAN_FRONTEND=noninteractive - -ARG PROFILE=release -WORKDIR /substrate - -COPY . /substrate - -RUN apt-get update && \ - apt-get dist-upgrade -y -o Dpkg::Options::="--force-confold" && \ - apt-get install -y cmake pkg-config libssl-dev git clang - -RUN curl https://sh.rustup.rs -sSf | sh -s -- -y && \ - export PATH="$PATH:$HOME/.cargo/bin" && \ - rustup toolchain install nightly && \ - rustup target add wasm32-unknown-unknown --toolchain nightly && \ - rustup default stable && \ - cargo build "--$PROFILE" - -# ===== SECOND STAGE ====== - -FROM phusion/baseimage:0.11 -LABEL maintainer="chevdor@gmail.com" -LABEL description="This is the 2nd stage: a very small image where we copy the Substrate binary." -ARG PROFILE=release - -RUN mv /usr/share/ca* /tmp && \ - rm -rf /usr/share/* && \ - mv /tmp/ca-certificates /usr/share/ && \ - useradd -m -u 1000 -U -s /bin/sh -d /substrate substrate && \ - mkdir -p /substrate/.local/share/substrate && \ - chown -R substrate:substrate /substrate/.local && \ - ln -s /substrate/.local/share/substrate /data - -COPY --from=builder /substrate/target/$PROFILE/substrate /usr/local/bin -COPY --from=builder /substrate/target/$PROFILE/subkey /usr/local/bin -COPY --from=builder /substrate/target/$PROFILE/node-rpc-client /usr/local/bin -COPY --from=builder /substrate/target/$PROFILE/node-template /usr/local/bin -COPY --from=builder /substrate/target/$PROFILE/chain-spec-builder /usr/local/bin - -# checks -RUN ldd /usr/local/bin/substrate && \ - /usr/local/bin/substrate --version - -# Shrinking -RUN rm -rf /usr/lib/python* && \ - rm -rf /usr/bin /usr/sbin /usr/share/man - -USER substrate -EXPOSE 30333 9933 9944 9615 -VOLUME ["/data"] - -CMD ["/usr/local/bin/substrate"] diff --git a/.maintain/gitlab/check_polkadot_companion_build.sh b/.maintain/gitlab/check_polkadot_companion_build.sh deleted file mode 100755 index 72bfaf7151522..0000000000000 --- a/.maintain/gitlab/check_polkadot_companion_build.sh +++ /dev/null @@ -1,99 +0,0 @@ -#!/usr/bin/env sh -# -# check if a pr is compatible with polkadot companion pr or master if not -# available -# -# to override one that was just mentioned mark companion pr in the body of the -# polkadot pr like -# -# polkadot companion: paritytech/polkadot#567 -# - -set -e - -github_api_substrate_pull_url="https://api.github.com/repos/paritytech/substrate/pulls" -# use github api v3 in order to access the data without authentication -github_header="Authorization: token ${GITHUB_PR_TOKEN}" - -boldprint () { printf "|\n| \033[1m${@}\033[0m\n|\n" ; } -boldcat () { printf "|\n"; while read l; do printf "| \033[1m${l}\033[0m\n"; done; printf "|\n" ; } - - - -boldcat <<-EOT - - -check_polkadot_companion_build -============================== - -this job checks if there is a string in the description of the pr like - -polkadot companion: paritytech/polkadot#567 - - -it will then run cargo check from this polkadot's branch with substrate code -from this pull request. otherwise, it will uses master instead - - -EOT - -# Set the user name and email to make merging work -git config --global user.name 'CI system' -git config --global user.email '<>' - -# Merge master into our branch before building Polkadot to make sure we don't miss -# any commits that are required by Polkadot. -git fetch --depth 100 origin -git merge origin/master - -# Clone the current Polkadot master branch into ./polkadot. -# NOTE: we need to pull enough commits to be able to find a common -# ancestor for successfully performing merges below. -git clone --depth 20 https://github.com/paritytech/polkadot.git - -cd polkadot - -# either it's a pull request then check for a companion otherwise use -# polkadot:master -if expr match "${CI_COMMIT_REF_NAME}" '^[0-9]\+$' >/dev/null -then - boldprint "this is pull request no ${CI_COMMIT_REF_NAME}" - - pr_data_file="$(mktemp)" - # get the last reference to a pr in polkadot - curl -sSL -H "${github_header}" -o "${pr_data_file}" \ - "${github_api_substrate_pull_url}/${CI_COMMIT_REF_NAME}" - - pr_body="$(sed -n -r 's/^[[:space:]]+"body": (".*")[^"]+$/\1/p' "${pr_data_file}")" - - pr_companion="$(echo "${pr_body}" | sed -n -r \ - -e 's;^.*[Cc]ompanion.*paritytech/polkadot#([0-9]+).*$;\1;p' \ - -e 's;^.*[Cc]ompanion.*https://github.com/paritytech/polkadot/pull/([0-9]+).*$;\1;p' \ - | tail -n 1)" - - if [ "${pr_companion}" ] - then - boldprint "companion pr specified/detected: #${pr_companion}" - git fetch origin refs/pull/${pr_companion}/head:pr/${pr_companion} - git checkout pr/${pr_companion} - git merge origin/master - else - boldprint "no companion branch found - building polkadot:master" - fi - rm -f "${pr_data_file}" -else - boldprint "this is not a pull request - building polkadot:master" -fi - -# Patch all Substrate crates in Polkadot -diener patch --crates-to-patch ../ --substrate --path Cargo.toml - -# We need to update specifically our patched Substrate crates so that other -# crates that depend on them (e.g. Polkadot, BEEFY) use this unified version -# NOTE: There's no way to only update patched crates, so we use a heuristic -# of updating a crucial Substrate crate (`sp-core`) to minimize the impact of -# updating unrelated dependencies -cargo update -p sp-core - -# Test Polkadot pr or master branch with this Substrate commit. -time cargo test --workspace --release --verbose --features=runtime-benchmarks diff --git a/.maintain/gitlab/check_polkadot_companion_status.sh b/.maintain/gitlab/check_polkadot_companion_status.sh deleted file mode 100755 index e0412c7b7bec7..0000000000000 --- a/.maintain/gitlab/check_polkadot_companion_status.sh +++ /dev/null @@ -1,102 +0,0 @@ -#!/bin/sh -# -# check for a polkadot companion pr and ensure it has approvals and is -# mergeable -# - -github_api_substrate_pull_url="https://api.github.com/repos/paritytech/substrate/pulls" -github_api_polkadot_pull_url="https://api.github.com/repos/paritytech/polkadot/pulls" -# use github api v3 in order to access the data without authentication -github_header="Authorization: token ${GITHUB_PR_TOKEN}" - -boldprint () { printf "|\n| \033[1m${@}\033[0m\n|\n" ; } -boldcat () { printf "|\n"; while read l; do printf "| \033[1m${l}\033[0m\n"; done; printf "|\n" ; } - - - -boldcat <<-EOT - - -check_polkadot_companion_status -=============================== - -this job checks if there is a string in the description of the pr like - -polkadot companion: paritytech/polkadot#567 - -and checks its status. - - -EOT - - -if ! [ "${CI_COMMIT_REF_NAME}" -gt 0 2>/dev/null ] -then - boldprint "this doesn't seem to be a pull request" - exit 1 -fi - -boldprint "this is pull request no ${CI_COMMIT_REF_NAME}" - -pr_body="$(curl -H "${github_header}" -s ${github_api_substrate_pull_url}/${CI_COMMIT_REF_NAME} \ - | sed -n -r 's/^[[:space:]]+"body": (".*")[^"]+$/\1/p')" - -# get companion if explicitly specified -pr_companion="$(echo "${pr_body}" | sed -n -r \ - -e 's;^.*[Cc]ompanion.*paritytech/polkadot#([0-9]+).*$;\1;p' \ - -e 's;^.*[Cc]ompanion.*https://github.com/paritytech/polkadot/pull/([0-9]+).*$;\1;p' \ - | tail -n 1)" - -if [ -z "${pr_companion}" ] -then - boldprint "no companion pr found" - exit 0 -fi - -boldprint "companion pr: #${pr_companion}" - -# check the status of that pull request - needs to be -# approved and mergable - -curl -H "${github_header}" -sS -o companion_pr.json \ - ${github_api_polkadot_pull_url}/${pr_companion} - -pr_head_sha=$(jq -r -e '.head.sha' < companion_pr.json) -boldprint "Polkadot PR's HEAD SHA: $pr_head_sha" - -curl -H "${github_header}" -sS -o companion_pr_reviews.json \ - ${github_api_polkadot_pull_url}/${pr_companion}/reviews - -# If there are any 'CHANGES_REQUESTED' reviews for the *current* review -jq -r -e '.[] | select(.state == "CHANGES_REQUESTED").commit_id' \ - < companion_pr_reviews.json > companion_pr_reviews_current.json -while IFS= read -r line; do - if [ "$line" = "$pr_head_sha" ]; then - boldprint "polkadot pr #${pr_companion} has CHANGES_REQUESTED for the latest commit" - exit 1 - fi -done < companion_pr_reviews_current.json - -# Then we check for at least 1 APPROVED -if [ -z "$(jq -r -e '.[].state | select(. == "APPROVED")' < companion_pr_reviews.json)" ]; then - boldprint "polkadot pr #${pr_companion} not APPROVED" - exit 1 -fi - -boldprint "polkadot pr #${pr_companion} state APPROVED" - -if jq -e .merged < companion_pr.json >/dev/null -then - boldprint "polkadot pr #${pr_companion} already merged" - exit 0 -fi - -if jq -e '.mergeable' < companion_pr.json >/dev/null -then - boldprint "polkadot pr #${pr_companion} mergeable" -else - boldprint "polkadot pr #${pr_companion} not mergeable" - exit 1 -fi - -exit 0 diff --git a/.maintain/monitoring/alerting-rules/alerting-rules.yaml b/.maintain/monitoring/alerting-rules/alerting-rules.yaml index 7a69cba66c3f3..2711610024330 100644 --- a/.maintain/monitoring/alerting-rules/alerting-rules.yaml +++ b/.maintain/monitoring/alerting-rules/alerting-rules.yaml @@ -133,16 +133,6 @@ groups: # Others ############################################################################## - - alert: ContinuousTaskEnded - expr: '(polkadot_tasks_spawned_total{task_name != "basic-authorship-proposer", task_name != "substrate-rpc-subscription"} == 1) - - on(instance, task_name) group_left() (polkadot_tasks_ended_total == 1)' - for: 5m - labels: - severity: warning - annotations: - message: 'Continuous task {{ $labels.task_name }} on node - {{ $labels.instance }} ended unexpectedly.' - - alert: AuthorityDiscoveryDiscoveryFailureHigh expr: 'polkadot_authority_discovery_handle_value_found_event_failure / ignoring(name) diff --git a/Cargo.lock b/Cargo.lock index be369c670c0b8..f15e363bfd6ae 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14,20 +14,11 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a55f82cfe485775d02112886f4169bde0c5894d75e79ead7eafe7e40a25e45f7" -dependencies = [ - "gimli 0.23.0", -] - -[[package]] -name = "addr2line" -version = "0.15.1" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03345e98af8f3d786b6d9f656ccfa6ac316d954e92bc4841f0bba20789d5fb5a" +checksum = "3e61f2b7f93d2c7d2b08263acaa4a363b3e276806c68af6134c44f523bf1aacd" dependencies = [ - "gimli 0.24.0", + "gimli 0.25.0", ] [[package]] @@ -53,7 +44,7 @@ checksum = "495ee669413bfbe9e8cace80f4d3d78e6d8c8d99579f97fb93bde351b185f2d4" dependencies = [ "cfg-if 1.0.0", "cipher", - "cpufeatures", + "cpufeatures 0.1.5", "opaque-debug 0.3.0", ] @@ -84,9 +75,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "0.7.15" +version = "0.7.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5" +checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" dependencies = [ "memchr", ] @@ -165,9 +156,9 @@ checksum = "9d6e24d2cce90c53b948c46271bfb053e4bdc2db9b5d3f65e20f8cf28a1b7fc3" [[package]] name = "assert_cmd" -version = "1.0.3" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2475b58cd94eb4f70159f4fd8844ba3b807532fe3131b3373fae060bbe30396" +checksum = "e996dc7940838b7ef1096b882e29ec30a3149a3a443cdc8dba19ed382eca1fe2" dependencies = [ "bstr", "doc-comment", @@ -291,9 +282,9 @@ dependencies = [ [[package]] name = "async-std" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9f06685bad74e0570f5213741bea82158279a4103d988e57bfada11ad230341" +checksum = "f8056f1455169ab86dd47b47391e4ab0cbd25410a70e9fe675544f49bafaf952" dependencies = [ "async-attributes", "async-channel", @@ -301,7 +292,7 @@ dependencies = [ "async-io", "async-lock", "async-process", - "crossbeam-utils 0.8.3", + "crossbeam-utils 0.8.5", "futures-channel", "futures-core", "futures-io", @@ -415,15 +406,16 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "backtrace" -version = "0.3.56" +version = "0.3.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d117600f438b1707d4e4ae15d3595657288f8235a0eb593e80ecc98ab34e1bc" +checksum = "e7a905d892734eea339e896738c14b9afce22b5318f64b951e70bf3844419b01" dependencies = [ - "addr2line 0.14.1", + "addr2line", + "cc", "cfg-if 1.0.0", "libc", "miniz_oxide", - "object 0.23.0", + "object 0.26.0", "rustc-demangle", ] @@ -448,9 +440,9 @@ checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" [[package]] name = "base58" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5024ee8015f02155eee35c711107ddd9a9bf3cb689cf2a9089c97e79b6e1ae83" +checksum = "6107fe1be6682a68940da878d9e9f5e90ca5745b3dec9fd1bb393c8777d4f581" [[package]] name = "base64" @@ -492,6 +484,80 @@ dependencies = [ "serde", ] +[[package]] +name = "beefy-gadget" +version = "4.0.0-dev" +dependencies = [ + "beefy-primitives", + "fnv", + "futures 0.3.16", + "log 0.4.14", + "parity-scale-codec", + "parking_lot 0.11.1", + "sc-client-api", + "sc-keystore", + "sc-network", + "sc-network-gossip", + "sc-network-test", + "sc-utils", + "sp-api", + "sp-application-crypto", + "sp-arithmetic", + "sp-blockchain", + "sp-core", + "sp-keystore", + "sp-runtime", + "strum 0.21.0", + "substrate-prometheus-endpoint", + "thiserror", + "wasm-timer", +] + +[[package]] +name = "beefy-gadget-rpc" +version = "4.0.0-dev" +dependencies = [ + "beefy-gadget", + "beefy-primitives", + "futures 0.3.16", + "jsonrpc-core", + "jsonrpc-core-client", + "jsonrpc-derive", + "jsonrpc-pubsub", + "log 0.4.14", + "parity-scale-codec", + "sc-rpc", + "serde", + "sp-core", + "sp-runtime", +] + +[[package]] +name = "beefy-merkle-tree" +version = "4.0.0-dev" +dependencies = [ + "env_logger 0.9.0", + "hex", + "hex-literal", + "log 0.4.14", + "tiny-keccak", +] + +[[package]] +name = "beefy-primitives" +version = "4.0.0-dev" +dependencies = [ + "hex-literal", + "parity-scale-codec", + "scale-info", + "sp-api", + "sp-application-crypto", + "sp-core", + "sp-keystore", + "sp-runtime", + "sp-std", +] + [[package]] name = "bincode" version = "1.3.2" @@ -818,7 +884,7 @@ checksum = "fee7ad89dc1128635074c268ee661f90c3f7e83d9fd12910608c36b47d6c3412" dependencies = [ "cfg-if 1.0.0", "cipher", - "cpufeatures", + "cpufeatures 0.1.5", "zeroize", ] @@ -976,6 +1042,15 @@ dependencies = [ "libc", ] +[[package]] +name = "cpufeatures" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95059428f66df56b63431fdb4e1947ed2190586af5c5a8a8b71122bdf5a7f469" +dependencies = [ + "libc", +] + [[package]] name = "cpuid-bool" version = "0.1.2" @@ -993,11 +1068,11 @@ dependencies = [ [[package]] name = "cranelift-bforest" -version = "0.74.0" +version = "0.77.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8ca3560686e7c9c7ed7e0fe77469f2410ba5d7781b1acaa9adc8d8deea28e3e" +checksum = "15013642ddda44eebcf61365b2052a23fd8b7314f90ba44aa059ec02643c5139" dependencies = [ - "cranelift-entity 0.74.0", + "cranelift-entity 0.77.0", ] [[package]] @@ -1014,26 +1089,25 @@ dependencies = [ "gimli 0.22.0", "log 0.4.14", "regalloc", - "smallvec 1.6.1", + "smallvec 1.7.0", "target-lexicon 0.11.2", "thiserror", ] [[package]] name = "cranelift-codegen" -version = "0.74.0" +version = "0.77.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf9bf1ffffb6ce3d2e5ebc83549bd2436426c99b31cc550d521364cbe35d276" +checksum = "298f2a7ed5fdcb062d8e78b7496b0f4b95265d20245f2d0ca88f846dd192a3a3" dependencies = [ - "cranelift-bforest 0.74.0", - "cranelift-codegen-meta 0.74.0", - "cranelift-codegen-shared 0.74.0", - "cranelift-entity 0.74.0", - "gimli 0.24.0", + "cranelift-bforest 0.77.0", + "cranelift-codegen-meta 0.77.0", + "cranelift-codegen-shared 0.77.0", + "cranelift-entity 0.77.0", + "gimli 0.25.0", "log 0.4.14", "regalloc", - "serde", - "smallvec 1.6.1", + "smallvec 1.7.0", "target-lexicon 0.12.0", ] @@ -1049,12 +1123,12 @@ dependencies = [ [[package]] name = "cranelift-codegen-meta" -version = "0.74.0" +version = "0.77.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cc21936a5a6d07e23849ffe83e5c1f6f50305c074f4b2970ca50c13bf55b821" +checksum = "5cf504261ac62dfaf4ffb3f41d88fd885e81aba947c1241275043885bc5f0bac" dependencies = [ - "cranelift-codegen-shared 0.74.0", - "cranelift-entity 0.74.0", + "cranelift-codegen-shared 0.77.0", + "cranelift-entity 0.77.0", ] [[package]] @@ -1065,12 +1139,9 @@ checksum = "6759012d6d19c4caec95793f052613e9d4113e925e7f14154defbac0f1d4c938" [[package]] name = "cranelift-codegen-shared" -version = "0.74.0" +version = "0.77.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca5b6ffaa87560bebe69a5446449da18090b126037920b0c1c6d5945f72faf6b" -dependencies = [ - "serde", -] +checksum = "1cd2a72db4301dbe7e5a4499035eedc1e82720009fb60603e20504d8691fa9cd" [[package]] name = "cranelift-entity" @@ -1083,9 +1154,9 @@ dependencies = [ [[package]] name = "cranelift-entity" -version = "0.74.0" +version = "0.77.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d6b4a8bef04f82e4296782646f733c641d09497df2fabf791323fefaa44c64c" +checksum = "48868faa07cacf948dc4a1773648813c0e453ff9467e800ff10f6a78c021b546" dependencies = [ "serde", ] @@ -1098,47 +1169,47 @@ checksum = "b608bb7656c554d0a4cf8f50c7a10b857e80306f6ff829ad6d468a7e2323c8d8" dependencies = [ "cranelift-codegen 0.68.0", "log 0.4.14", - "smallvec 1.6.1", + "smallvec 1.7.0", "target-lexicon 0.11.2", ] [[package]] name = "cranelift-frontend" -version = "0.74.0" +version = "0.77.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c31b783b351f966fce33e3c03498cb116d16d97a8f9978164a60920bd0d3a99c" +checksum = "351c9d13b4ecd1a536215ec2fd1c3ee9ee8bc31af172abf1e45ed0adb7a931df" dependencies = [ - "cranelift-codegen 0.74.0", + "cranelift-codegen 0.77.0", "log 0.4.14", - "smallvec 1.6.1", + "smallvec 1.7.0", "target-lexicon 0.12.0", ] [[package]] name = "cranelift-native" -version = "0.74.0" +version = "0.77.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a77c88d3dd48021ff1e37e978a00098524abd3513444ae252c08d37b310b3d2a" +checksum = "6df8b556663d7611b137b24db7f6c8d9a8a27d7f29c7ea7835795152c94c1b75" dependencies = [ - "cranelift-codegen 0.74.0", + "cranelift-codegen 0.77.0", + "libc", "target-lexicon 0.12.0", ] [[package]] name = "cranelift-wasm" -version = "0.74.0" +version = "0.77.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edb6d408e2da77cdbbd65466298d44c86ae71c1785d2ab0d8657753cdb4d9d89" +checksum = "7a69816d90db694fa79aa39b89dda7208a4ac74b6f2b8f3c4da26ee1c8bdfc5e" dependencies = [ - "cranelift-codegen 0.74.0", - "cranelift-entity 0.74.0", - "cranelift-frontend 0.74.0", - "itertools 0.10.0", + "cranelift-codegen 0.77.0", + "cranelift-entity 0.77.0", + "cranelift-frontend 0.77.0", + "itertools", "log 0.4.14", - "serde", - "smallvec 1.6.1", - "thiserror", - "wasmparser 0.78.2", + "smallvec 1.7.0", + "wasmparser 0.80.2", + "wasmtime-types", ] [[package]] @@ -1152,16 +1223,17 @@ dependencies = [ [[package]] name = "criterion" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab327ed7354547cc2ef43cbe20ef68b988e70b4b593cbd66a2a61733123a3d23" +checksum = "1604dafd25fba2fe2d5895a9da139f8dc9b319a5fe5354ca137cbbce4e178d10" dependencies = [ "atty", "cast", "clap", "criterion-plot", "csv", - "itertools 0.10.0", + "futures 0.3.16", + "itertools", "lazy_static", "num-traits", "oorandom", @@ -1173,17 +1245,18 @@ dependencies = [ "serde_derive", "serde_json", "tinytemplate", + "tokio", "walkdir", ] [[package]] name = "criterion-plot" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e022feadec601fba1649cfa83586381a4ad31c6bf3a9ab7d408118b05dd9889d" +checksum = "d00996de9f2f7559f7f4dc286073197f83e92256a59ed395f9aac01fe717da57" dependencies = [ "cast", - "itertools 0.9.0", + "itertools", ] [[package]] @@ -1193,28 +1266,28 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dca26ee1f8d361640700bde38b2c37d8c22b3ce2d360e1fc1c74ea4b0aa7d775" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.3", + "crossbeam-utils 0.8.5", ] [[package]] name = "crossbeam-deque" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94af6efb46fef72616855b036a624cf27ba656ffc9be1b9a3c931cfc7749a9a9" +checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" dependencies = [ "cfg-if 1.0.0", "crossbeam-epoch", - "crossbeam-utils 0.8.3", + "crossbeam-utils 0.8.5", ] [[package]] name = "crossbeam-epoch" -version = "0.9.3" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2584f639eb95fea8c798496315b297cf81b9b58b6d30ab066a75455333cf4b12" +checksum = "4ec02e091aa634e2c3ada4a392989e7c3116673ef0ac5b72232439094d73b7fd" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.3", + "crossbeam-utils 0.8.5", "lazy_static", "memoffset", "scopeguard", @@ -1233,11 +1306,10 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7e9d99fa91428effe99c5c6d4634cdeba32b8cf784fc428a2a687f61a952c49" +checksum = "d82cfc11ce7f2c3faef78d8a684447b40d503d9681acebed6cb728d45940c4db" dependencies = [ - "autocfg 1.0.1", "cfg-if 1.0.0", "lazy_static", ] @@ -1434,10 +1506,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e25ea47919b1560c4e3b7fe0aaab9becf5b84a10325ddf7db0f0ba5e1026499" [[package]] -name = "difference" -version = "2.0.0" +name = "difflib" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524cbf6897b527295dff137cec09ecf3a05f4fddffd7dfcd1585403449e74198" +checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8" [[package]] name = "digest" @@ -1598,7 +1670,7 @@ dependencies = [ "ed25519", "rand 0.7.3", "serde", - "sha2 0.9.3", + "sha2 0.9.8", "zeroize", ] @@ -1703,15 +1775,6 @@ version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68b91989ae21441195d7d9b9993a2f9295c7e1a8c96255d8b729accddc124797" -[[package]] -name = "erased-serde" -version = "0.3.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0465971a8cc1fa2455c8465aaa377131e1f1cf4983280f474a13e68793aa770c" -dependencies = [ - "serde", -] - [[package]] name = "errno" version = "0.2.7" @@ -1982,10 +2045,10 @@ dependencies = [ "parity-scale-codec", "parity-util-mem", "paste 1.0.4", - "pretty_assertions 0.6.1", + "pretty_assertions", "scale-info", "serde", - "smallvec 1.6.1", + "smallvec 1.7.0", "sp-arithmetic", "sp-core", "sp-inherents", @@ -2036,7 +2099,7 @@ dependencies = [ "frame-support-test-pallet", "frame-system", "parity-scale-codec", - "pretty_assertions 0.6.1", + "pretty_assertions", "rustversion", "scale-info", "serde", @@ -2355,8 +2418,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ "cfg-if 1.0.0", + "js-sys", "libc", "wasi 0.9.0+wasi-snapshot-preview1", + "wasm-bindgen", ] [[package]] @@ -2395,15 +2460,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.23.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6503fe142514ca4799d4c26297c4248239fe8838d827db6bd6065c6ed29a6ce" - -[[package]] -name = "gimli" -version = "0.24.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4075386626662786ddb0ec9081e7c7eeb1ba31951f447ca780ef9f5d568189" +checksum = "f0a01e0497841a3b2db4f8afa483cce65f7e96a3498bd6c541734792aeac8fe7" dependencies = [ "fallible-iterator", "indexmap", @@ -2412,9 +2471,9 @@ dependencies = [ [[package]] name = "git2" -version = "0.13.21" +version = "0.13.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "659cd14835e75b64d9dba5b660463506763cf0aa6cb640aeeb0e98d841093490" +checksum = "9c1cbbfc9a1996c6af82c2b4caf828d2c653af4fcdbb0e5674cc966eee5a4197" dependencies = [ "bitflags", "libc", @@ -2531,9 +2590,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hex-literal" -version = "0.3.1" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5af1f635ef1bc545d78392b136bfe1c9809e029023c84a3638a864a10b8819c8" +checksum = "21e4590e13640f19f249fe3e4eca5113bc4289f2497710378190e7f4bd96f45b" [[package]] name = "hex_fmt" @@ -2561,17 +2620,6 @@ dependencies = [ "digest 0.9.0", ] -[[package]] -name = "hmac-drbg" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6e570451493f10f6581b48cdd530413b63ea9e780f544bfd3bdcaa0d89d1a7b" -dependencies = [ - "digest 0.8.1", - "generic-array 0.12.4", - "hmac 0.7.1", -] - [[package]] name = "hmac-drbg" version = "0.3.0" @@ -2892,15 +2940,6 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135" -[[package]] -name = "itertools" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b" -dependencies = [ - "either", -] - [[package]] name = "itertools" version = "0.10.0" @@ -2927,9 +2966,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.50" +version = "0.3.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d99f9e3e84b8f67f846ef5b4cbbc3b1c29f6c759fcbce6f01aa0e73d932a24c" +checksum = "1866b355d9c878e5e607473cbe3f63282c0b7aad2db1dbebf55076c686918254" dependencies = [ "wasm-bindgen", ] @@ -3072,9 +3111,9 @@ dependencies = [ [[package]] name = "jsonrpsee-proc-macros" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f37924e16300e249a52a22cabb5632f846dc9760b39355f5e8bc70cd23dc6300" +checksum = "8edb341d35279b59c79d7fe9e060a51aec29d45af99cc7c72ea7caa350fa71a4" dependencies = [ "Inflector", "bae", @@ -3086,9 +3125,9 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d67724d368c59e08b557a516cf8fcc51100e7a708850f502e1044b151fe89788" +checksum = "4cc738fd55b676ada3271ef7c383a14a0867a2a88b0fa941311bf5fc0a29d498" dependencies = [ "async-trait", "beef", @@ -3104,16 +3143,16 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-client" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e2834b6e7f57ce9a4412ed4d6dc95125d2c8612e68f86b9d9a07369164e4198" +checksum = "9841352dbecf4c2ed5dc71698df9f1660262ae4e0b610e968602529bdbcf7b30" dependencies = [ "async-trait", "fnv", "futures 0.3.16", "jsonrpsee-types", "log 0.4.14", - "pin-project 1.0.5", + "pin-project 1.0.8", "rustls", "rustls-native-certs", "serde", @@ -3169,7 +3208,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "45a3f58dc069ec0e205a27f5b45920722a46faed802a0541538241af6228f512" dependencies = [ "parity-util-mem", - "smallvec 1.6.1", + "smallvec 1.7.0", ] [[package]] @@ -3198,7 +3237,7 @@ dependencies = [ "parking_lot 0.11.1", "regex", "rocksdb", - "smallvec 1.6.1", + "smallvec 1.7.0", ] [[package]] @@ -3227,15 +3266,15 @@ checksum = "3576a87f2ba00f6f106fdfcd16db1d698d648a26ad8e0573cad8537c3c362d2a" [[package]] name = "libc" -version = "0.2.95" +version = "0.2.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "789da6d93f1b866ffe175afc5322a4d76c038605a1c3319bb57b06967ca98a36" +checksum = "dd8f7255a17a627354f321ef0055d63b898c6fb27eff628af4d1b66b7331edf6" [[package]] name = "libgit2-sys" -version = "0.12.22+1.1.0" +version = "0.12.23+1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89c53ac117c44f7042ad8d8f5681378dfbc6010e49ec2c0d1f11dfedc7a4a1c3" +checksum = "29730a445bae719db3107078b46808cc45a5b7a6bae3f31272923af969453356" dependencies = [ "cc", "libc", @@ -3313,8 +3352,8 @@ dependencies = [ "libp2p-yamux", "multiaddr", "parking_lot 0.11.1", - "pin-project 1.0.5", - "smallvec 1.6.1", + "pin-project 1.0.8", + "smallvec 1.7.0", "wasm-timer", ] @@ -3338,14 +3377,14 @@ dependencies = [ "multihash 0.14.0", "multistream-select", "parking_lot 0.11.1", - "pin-project 1.0.5", + "pin-project 1.0.8", "prost", "prost-build", "rand 0.7.3", "ring", "rw-stream-sink", - "sha2 0.9.3", - "smallvec 1.6.1", + "sha2 0.9.8", + "smallvec 1.7.0", "thiserror", "unsigned-varint 0.7.0", "void", @@ -3373,7 +3412,7 @@ dependencies = [ "futures 0.3.16", "libp2p-core", "log 0.4.14", - "smallvec 1.6.1", + "smallvec 1.7.0", "trust-dns-resolver", ] @@ -3392,7 +3431,7 @@ dependencies = [ "prost", "prost-build", "rand 0.7.3", - "smallvec 1.6.1", + "smallvec 1.7.0", ] [[package]] @@ -3415,8 +3454,8 @@ dependencies = [ "prost-build", "rand 0.7.3", "regex", - "sha2 0.9.3", - "smallvec 1.6.1", + "sha2 0.9.8", + "smallvec 1.7.0", "unsigned-varint 0.7.0", "wasm-timer", ] @@ -3433,7 +3472,7 @@ dependencies = [ "log 0.4.14", "prost", "prost-build", - "smallvec 1.6.1", + "smallvec 1.7.0", "wasm-timer", ] @@ -3455,8 +3494,8 @@ dependencies = [ "prost", "prost-build", "rand 0.7.3", - "sha2 0.9.3", - "smallvec 1.6.1", + "sha2 0.9.8", + "smallvec 1.7.0", "uint", "unsigned-varint 0.7.0", "void", @@ -3479,7 +3518,7 @@ dependencies = [ "libp2p-swarm", "log 0.4.14", "rand 0.8.4", - "smallvec 1.6.1", + "smallvec 1.7.0", "socket2 0.4.0", "void", ] @@ -3498,7 +3537,7 @@ dependencies = [ "nohash-hasher", "parking_lot 0.11.1", "rand 0.7.3", - "smallvec 1.6.1", + "smallvec 1.7.0", "unsigned-varint 0.7.0", ] @@ -3517,7 +3556,7 @@ dependencies = [ "prost", "prost-build", "rand 0.8.4", - "sha2 0.9.3", + "sha2 0.9.8", "snow", "static_assertions", "x25519-dalek", @@ -3564,7 +3603,7 @@ checksum = "07cb4dd4b917e5b40ddefe49b96b07adcd8d342e0317011d175b7b2bb1dcc974" dependencies = [ "futures 0.3.16", "log 0.4.14", - "pin-project 1.0.5", + "pin-project 1.0.8", "rand 0.7.3", "salsa20", "sha3", @@ -3583,11 +3622,11 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log 0.4.14", - "pin-project 1.0.5", + "pin-project 1.0.8", "prost", "prost-build", "rand 0.7.3", - "smallvec 1.6.1", + "smallvec 1.7.0", "unsigned-varint 0.7.0", "void", "wasm-timer", @@ -3608,7 +3647,7 @@ dependencies = [ "lru", "minicbor", "rand 0.7.3", - "smallvec 1.6.1", + "smallvec 1.7.0", "unsigned-varint 0.7.0", "wasm-timer", ] @@ -3624,7 +3663,7 @@ dependencies = [ "libp2p-core", "log 0.4.14", "rand 0.7.3", - "smallvec 1.6.1", + "smallvec 1.7.0", "void", "wasm-timer", ] @@ -3727,56 +3766,57 @@ dependencies = [ [[package]] name = "libsecp256k1" -version = "0.3.5" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc1e2c808481a63dc6da2074752fdd4336a3c8fcc68b83db6f1fd5224ae7962" +checksum = "bd1137239ab33b41aa9637a88a28249e5e70c40a42ccc92db7f12cc356c1fcd7" dependencies = [ "arrayref", - "crunchy", - "digest 0.8.1", - "hmac-drbg 0.2.0", + "base64 0.12.3", + "digest 0.9.0", + "hmac-drbg", + "libsecp256k1-core 0.2.2", + "libsecp256k1-gen-ecmult 0.2.1", + "libsecp256k1-gen-genmult 0.2.1", "rand 0.7.3", - "sha2 0.8.2", - "subtle 2.4.0", + "serde", + "sha2 0.9.8", "typenum", ] [[package]] name = "libsecp256k1" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd1137239ab33b41aa9637a88a28249e5e70c40a42ccc92db7f12cc356c1fcd7" +checksum = "c9d220bc1feda2ac231cb78c3d26f27676b8cf82c96971f7aeef3d0cf2797c73" dependencies = [ "arrayref", "base64 0.12.3", "digest 0.9.0", - "hmac-drbg 0.3.0", - "libsecp256k1-core", - "libsecp256k1-gen-ecmult", - "libsecp256k1-gen-genmult", + "hmac-drbg", + "libsecp256k1-core 0.2.2", + "libsecp256k1-gen-ecmult 0.2.1", + "libsecp256k1-gen-genmult 0.2.1", "rand 0.7.3", "serde", - "sha2 0.9.3", + "sha2 0.9.8", "typenum", ] [[package]] name = "libsecp256k1" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9d220bc1feda2ac231cb78c3d26f27676b8cf82c96971f7aeef3d0cf2797c73" +checksum = "b0452aac8bab02242429380e9b2f94ea20cea2b37e2c1777a1358799bbe97f37" dependencies = [ "arrayref", - "base64 0.12.3", + "base64 0.13.0", "digest 0.9.0", - "hmac-drbg 0.3.0", - "libsecp256k1-core", - "libsecp256k1-gen-ecmult", - "libsecp256k1-gen-genmult", - "rand 0.7.3", + "libsecp256k1-core 0.3.0", + "libsecp256k1-gen-ecmult 0.3.0", + "libsecp256k1-gen-genmult 0.3.0", + "rand 0.8.4", "serde", - "sha2 0.9.3", - "typenum", + "sha2 0.9.8", ] [[package]] @@ -3790,13 +3830,33 @@ dependencies = [ "subtle 2.4.0", ] +[[package]] +name = "libsecp256k1-core" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5be9b9bb642d8522a44d533eab56c16c738301965504753b03ad1de3425d5451" +dependencies = [ + "crunchy", + "digest 0.9.0", + "subtle 2.4.0", +] + [[package]] name = "libsecp256k1-gen-ecmult" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ccab96b584d38fac86a83f07e659f0deafd0253dc096dab5a36d53efe653c5c3" dependencies = [ - "libsecp256k1-core", + "libsecp256k1-core 0.2.2", +] + +[[package]] +name = "libsecp256k1-gen-ecmult" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3038c808c55c87e8a172643a7d87187fc6c4174468159cb3090659d55bcb4809" +dependencies = [ + "libsecp256k1-core 0.3.0", ] [[package]] @@ -3805,7 +3865,16 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67abfe149395e3aa1c48a2beb32b068e2334402df8181f818d3aee2b304c4f5d" dependencies = [ - "libsecp256k1-core", + "libsecp256k1-core 0.2.2", +] + +[[package]] +name = "libsecp256k1-gen-genmult" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3db8d6ba2cec9eacc40e6e8ccc98931840301f1006e95647ceb2dd5c3aa06f7c" +dependencies = [ + "libsecp256k1-core 0.3.0", ] [[package]] @@ -3991,9 +4060,9 @@ checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" [[package]] name = "memchr" -version = "2.3.4" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525" +checksum = "b16bd47d9e329435e309c58469fe0791c2d0d1ba96ec0954152a5ae2b04387dc" [[package]] name = "memmap" @@ -4204,7 +4273,7 @@ dependencies = [ "digest 0.9.0", "generic-array 0.14.4", "multihash-derive", - "sha2 0.9.3", + "sha2 0.9.8", "sha3", "unsigned-varint 0.5.1", ] @@ -4218,7 +4287,7 @@ dependencies = [ "digest 0.9.0", "generic-array 0.14.4", "multihash-derive", - "sha2 0.9.3", + "sha2 0.9.8", "unsigned-varint 0.7.0", ] @@ -4251,8 +4320,8 @@ dependencies = [ "bytes 1.0.1", "futures 0.3.16", "log 0.4.14", - "pin-project 1.0.5", - "smallvec 1.6.1", + "pin-project 1.0.8", + "smallvec 1.7.0", "unsigned-varint 0.7.0", ] @@ -4388,10 +4457,13 @@ version = "3.0.0-dev" dependencies = [ "assert_cmd", "async-std", + "criterion", "frame-benchmarking-cli", "frame-system", + "frame-system-rpc-runtime-api", "futures 0.3.16", "hex-literal", + "jsonrpsee-ws-client", "log 0.4.14", "nix", "node-executor", @@ -4399,12 +4471,14 @@ dependencies = [ "node-primitives", "node-rpc", "node-runtime", + "pallet-balances", "pallet-im-online", "pallet-transaction-payment", "parity-scale-codec", "platforms", "rand 0.7.3", "regex", + "remote-externalities", "sc-authority-discovery", "sc-basic-authorship", "sc-chain-spec", @@ -4430,6 +4504,7 @@ dependencies = [ "serde", "serde_json", "soketto 0.4.2", + "sp-api", "sp-authority-discovery", "sp-authorship", "sp-consensus", @@ -4449,7 +4524,9 @@ dependencies = [ "substrate-build-script-utils", "substrate-frame-cli", "tempfile", + "tokio", "try-runtime-cli", + "wait-timeout", ] [[package]] @@ -4762,9 +4839,9 @@ checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" [[package]] name = "nom" -version = "6.2.1" +version = "6.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c5c51b9083a3c620fa67a2a635d1ce7d95b897e957d6b28ff9a5da960a103a6" +checksum = "e7413f999671bd4745a7b624bd370a569fb6bc574b23c83a3c5ed2e453f3d5e2" dependencies = [ "bitvec 0.19.5", "funty", @@ -4876,28 +4953,20 @@ dependencies = [ [[package]] name = "object" -version = "0.23.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9a7ab5d64814df0fe4a4b5ead45ed6c5f181ee3ff04ba344313a6c80446c5d4" - -[[package]] -name = "object" -version = "0.24.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a5b3dd1c072ee7963717671d1ca129f1048fda25edea6b752bfc71ac8854170" +checksum = "c55827317fb4c08822499848a14237d2874d6f139828893017237e7ab93eb386" dependencies = [ "crc32fast", "indexmap", + "memchr", ] [[package]] name = "once_cell" -version = "1.7.2" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af8b08b04175473088b46763e51ee54da5f9a164bc162f615b91bc179dbf15a3" -dependencies = [ - "parking_lot 0.11.1", -] +checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56" [[package]] name = "oorandom" @@ -5114,6 +5183,50 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-beefy" +version = "4.0.0-dev" +dependencies = [ + "beefy-primitives", + "frame-support", + "frame-system", + "pallet-session", + "parity-scale-codec", + "scale-info", + "serde", + "sp-core", + "sp-io", + "sp-runtime", + "sp-staking", + "sp-std", +] + +[[package]] +name = "pallet-beefy-mmr" +version = "4.0.0-dev" +dependencies = [ + "beefy-merkle-tree", + "beefy-primitives", + "frame-support", + "frame-system", + "hex", + "hex-literal", + "libsecp256k1 0.7.0", + "log 0.4.14", + "pallet-beefy", + "pallet-mmr", + "pallet-mmr-primitives", + "pallet-session", + "parity-scale-codec", + "scale-info", + "serde", + "sp-core", + "sp-io", + "sp-runtime", + "sp-staking", + "sp-std", +] + [[package]] name = "pallet-bounties" version = "4.0.0-dev" @@ -5158,7 +5271,7 @@ dependencies = [ "frame-support", "frame-system", "hex-literal", - "libsecp256k1 0.3.5", + "libsecp256k1 0.6.0", "log 0.4.14", "pallet-balances", "pallet-contracts-primitives", @@ -5167,13 +5280,13 @@ dependencies = [ "pallet-timestamp", "pallet-utility", "parity-scale-codec", - "pretty_assertions 0.7.2", + "pretty_assertions", "pwasm-utils", "rand 0.7.3", "rand_pcg 0.2.1", "scale-info", "serde", - "smallvec 1.6.1", + "smallvec 1.7.0", "sp-core", "sp-io", "sp-runtime", @@ -5904,7 +6017,7 @@ dependencies = [ "scale-info", "serde", "serde_json", - "smallvec 1.6.1", + "smallvec 1.7.0", "sp-core", "sp-io", "sp-runtime", @@ -6045,9 +6158,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "2.2.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8975095a2a03bbbdc70a74ab11a4f76a6d0b84680d87c68d722531b0ac28e8a9" +checksum = "373b1a4c1338d9cd3d1fa53b3a11bdab5ab6bd80a20f7f7becd76953ae2be909" dependencies = [ "arrayvec 0.7.0", "bitvec 0.20.2", @@ -6059,9 +6172,9 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "2.2.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40dbbfef7f0a1143c5b06e0d76a6278e25dac0bc1af4be51a0fbb73f07e7ad09" +checksum = "1557010476e0595c9b568d16dcfb81b93cdeb157612726f5170d31aa707bed27" dependencies = [ "proc-macro-crate 1.0.0", "proc-macro2", @@ -6101,7 +6214,7 @@ dependencies = [ "parity-util-mem-derive", "parking_lot 0.11.1", "primitive-types", - "smallvec 1.6.1", + "smallvec 1.7.0", "winapi 0.3.9", ] @@ -6166,16 +6279,6 @@ dependencies = [ "rustc_version 0.2.3", ] -[[package]] -name = "parking_lot" -version = "0.10.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3a704eb390aafdc107b0e392f56a82b668e3a71366993b5340f5833fd62505e" -dependencies = [ - "lock_api 0.3.4", - "parking_lot_core 0.7.2", -] - [[package]] name = "parking_lot" version = "0.11.1" @@ -6202,20 +6305,6 @@ dependencies = [ "winapi 0.3.9", ] -[[package]] -name = "parking_lot_core" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d58c7c768d4ba344e3e8d72518ac13e259d7c7ade24167003b8488e10b6740a3" -dependencies = [ - "cfg-if 0.1.10", - "cloudabi", - "libc", - "redox_syscall 0.1.57", - "smallvec 1.6.1", - "winapi 0.3.9", -] - [[package]] name = "parking_lot_core" version = "0.8.3" @@ -6226,7 +6315,7 @@ dependencies = [ "instant", "libc", "redox_syscall 0.2.5", - "smallvec 1.6.1", + "smallvec 1.7.0", "winapi 0.3.9", ] @@ -6356,11 +6445,11 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.5" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96fa8ebb90271c4477f144354485b8068bd8f6b78b428b01ba892ca26caf0b63" +checksum = "576bc800220cc65dac09e99e97b08b358cfab6e17078de8dc5fee223bd2d0c08" dependencies = [ - "pin-project-internal 1.0.5", + "pin-project-internal 1.0.8", ] [[package]] @@ -6376,9 +6465,9 @@ dependencies = [ [[package]] name = "pin-project-internal" -version = "1.0.5" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "758669ae3558c6f74bd2a18b41f7ac0b5a195aea6639d6a9b5e5d1ad5ba24c0b" +checksum = "6e8fe8163d14ce7f0cdac2e040116f22eac817edabff0be91e8aff7e9accf389" dependencies = [ "proc-macro2", "quote", @@ -6417,9 +6506,9 @@ checksum = "989d43012e2ca1c4a02507c67282691a0a3207f9dc67cec596b43fe925b3d325" [[package]] name = "plotters" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45ca0ae5f169d0917a7c7f5a9c1a3d3d9598f18f529dd2b8373ed988efea307a" +checksum = "32a3fd9ec30b9749ce28cd91f255d569591cdf937fe280c312143e3c4bad6f2a" dependencies = [ "num-traits", "plotters-backend", @@ -6462,7 +6551,7 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9fcffab1f78ebbdf4b93b68c1ffebc24037eedf271edaca795732b24e5e4e349" dependencies = [ - "cpufeatures", + "cpufeatures 0.1.5", "opaque-debug 0.3.0", "universal-hash", ] @@ -6474,7 +6563,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a6ba6a405ef63530d6cb12802014b22f9c5751bd17cdcddbe9e46d5c8ae83287" dependencies = [ "cfg-if 1.0.0", - "cpufeatures", + "cpufeatures 0.1.5", "opaque-debug 0.3.0", "universal-hash", ] @@ -6487,11 +6576,12 @@ checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" [[package]] name = "predicates" -version = "1.0.7" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eeb433456c1a57cc93554dea3ce40b4c19c4057e41c55d4a0f3d84ea71c325aa" +checksum = "c143348f141cc87aab5b950021bac6145d0e5ae754b0591de23244cee42c9308" dependencies = [ - "difference", + "difflib", + "itertools", "predicates-core", ] @@ -6513,21 +6603,9 @@ dependencies = [ [[package]] name = "pretty_assertions" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f81e1644e1b54f5a68959a29aa86cde704219254669da328ecfdf6a1f09d427" -dependencies = [ - "ansi_term 0.11.0", - "ctor", - "difference", - "output_vt100", -] - -[[package]] -name = "pretty_assertions" -version = "0.7.2" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cab0e7c02cf376875e9335e0ba1da535775beb5450d21e1dffca068818ed98b" +checksum = "ec0cfe1b2403f172ba0f234e500906ee0a3e493fb81092dac23ebefe129301cc" dependencies = [ "ansi_term 0.12.1", "ctor", @@ -6640,15 +6718,15 @@ dependencies = [ [[package]] name = "prometheus" -version = "0.11.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8425533e7122f0c3cc7a37e6244b16ad3a2cc32ae7ac6276e2a75da0d9c200d" +checksum = "5986aa8d62380092d2f50f8b1cdba9cb9b6731ffd4b25b51fd126b6c3e05b99c" dependencies = [ "cfg-if 1.0.0", "fnv", "lazy_static", + "memchr", "parking_lot 0.11.1", - "regex", "thiserror", ] @@ -6670,7 +6748,7 @@ checksum = "355f634b43cdd80724ee7848f95770e7e70eefa6dcf14fea676216573b8fd603" dependencies = [ "bytes 1.0.1", "heck", - "itertools 0.10.0", + "itertools", "log 0.4.14", "multimap", "petgraph", @@ -6687,7 +6765,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "600d2f334aa05acb02a755e217ef1ab6dea4d51b58b7846588b747edec04efba" dependencies = [ "anyhow", - "itertools 0.10.0", + "itertools", "proc-macro2", "quote", "syn", @@ -6759,9 +6837,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" +checksum = "38bc8cc6a5f2e3655e0899c1b848643b2562f853f114bfec7be120678e3ace05" dependencies = [ "proc-macro2", ] @@ -7011,7 +7089,7 @@ checksum = "9ab346ac5921dc62ffa9f89b7a773907511cdfa5490c572ae9be1be33e8afa4a" dependencies = [ "crossbeam-channel", "crossbeam-deque", - "crossbeam-utils 0.8.3", + "crossbeam-utils 0.8.5", "lazy_static", "num_cpus", ] @@ -7078,20 +7156,18 @@ checksum = "571f7f397d61c4755285cd37853fe8e03271c243424a907415909379659381c5" dependencies = [ "log 0.4.14", "rustc-hash", - "serde", - "smallvec 1.6.1", + "smallvec 1.7.0", ] [[package]] name = "regex" -version = "1.4.3" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9251239e129e16308e70d853559389de218ac275b515068abc96829d05b948a" +checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461" dependencies = [ "aho-corasick", "memchr", "regex-syntax", - "thread_local", ] [[package]] @@ -7106,9 +7182,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.22" +version = "0.6.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5eb417147ba9860a96cfe72a0b93bf88fee1744b5636ec99ab20c1aa9376581" +checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" [[package]] name = "region" @@ -7162,9 +7238,9 @@ dependencies = [ [[package]] name = "retain_mut" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9c17925a9027d298a4603d286befe3f9dc0e8ed02523141914eb628798d6e5b" +checksum = "448296241d034b96c11173591deaa1302f2c17b56092106c1f92c1bc0183a8c9" [[package]] name = "ring" @@ -7852,7 +7928,6 @@ dependencies = [ "sc-allocator", "sc-executor-common", "sc-runtime-test", - "scoped-tls", "sp-core", "sp-io", "sp-runtime-interface", @@ -8005,7 +8080,7 @@ dependencies = [ "lru", "parity-scale-codec", "parking_lot 0.11.1", - "pin-project 1.0.5", + "pin-project 1.0.8", "prost", "prost-build", "quickcheck", @@ -8017,7 +8092,7 @@ dependencies = [ "sc-utils", "serde", "serde_json", - "smallvec 1.6.1", + "smallvec 1.7.0", "sp-arithmetic", "sp-blockchain", "sp-consensus", @@ -8244,7 +8319,7 @@ dependencies = [ "parity-scale-codec", "parity-util-mem", "parking_lot 0.11.1", - "pin-project 1.0.5", + "pin-project 1.0.8", "rand 0.7.3", "sc-block-builder", "sc-chain-spec", @@ -8300,6 +8375,7 @@ version = "2.0.0" dependencies = [ "fdlimit", "futures 0.3.16", + "hex", "hex-literal", "log 0.4.14", "parity-scale-codec", @@ -8373,7 +8449,7 @@ dependencies = [ "libp2p", "log 0.4.14", "parking_lot 0.11.1", - "pin-project 1.0.5", + "pin-project 1.0.8", "rand 0.7.3", "serde", "serde_json", @@ -8387,6 +8463,8 @@ version = "4.0.0-dev" dependencies = [ "ansi_term 0.12.1", "atty", + "chrono", + "criterion", "lazy_static", "log 0.4.14", "once_cell", @@ -8554,9 +8632,9 @@ dependencies = [ [[package]] name = "secrecy" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0673d6a6449f5e7d12a1caf424fd9363e2af3a4953023ed455e3c4beef4597c0" +checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" dependencies = [ "zeroize", ] @@ -8629,9 +8707,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.126" +version = "1.0.130" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec7505abeacaec74ae4778d9d9328fe5a5d04253220a85c4ee022239fc996d03" +checksum = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913" dependencies = [ "serde_derive", ] @@ -8657,9 +8735,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.126" +version = "1.0.130" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "963a7dbc9895aeac7ac90e74f34a5d5261828f79df35cbed41e10189d3804d43" +checksum = "d7bc1a1ab1961464eae040d96713baa5a724a8152c1222492465b54322ec508b" dependencies = [ "proc-macro2", "quote", @@ -8722,13 +8800,13 @@ dependencies = [ [[package]] name = "sha2" -version = "0.9.3" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa827a14b29ab7f44778d14a88d3cb76e949c45083f7dbfa507d0cb699dc12de" +checksum = "b69f9a4c9740d74c5baa3fd2e547f9525fa8088a8a958e0ca2409a514e33f5fa" dependencies = [ "block-buffer 0.9.0", "cfg-if 1.0.0", - "cpuid-bool", + "cpufeatures 0.2.1", "digest 0.9.0", "opaque-debug 0.3.0", ] @@ -8803,15 +8881,6 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" -[[package]] -name = "slog" -version = "2.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8347046d4ebd943127157b94d63abb990fcf729dc4e9978927fdf4ac3c998d06" -dependencies = [ - "erased-serde", -] - [[package]] name = "smallvec" version = "0.6.14" @@ -8823,9 +8892,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" +checksum = "1ecab6c735a6bb4139c0caafd0cc3635748bbb3acf4550e8138122099251f309" [[package]] name = "snap" @@ -8846,7 +8915,7 @@ dependencies = [ "rand_core 0.6.2", "ring", "rustc_version 0.3.3", - "sha2 0.9.3", + "sha2 0.9.8", "subtle 2.4.0", "x25519-dalek", ] @@ -9176,13 +9245,14 @@ dependencies = [ "secrecy", "serde", "serde_json", - "sha2 0.9.3", + "sha2 0.9.8", "sp-debug-derive", "sp-externalities", "sp-runtime-interface", "sp-serializer", "sp-std", "sp-storage", + "ss58-registry", "substrate-bip39", "thiserror", "tiny-bip39", @@ -9263,7 +9333,6 @@ dependencies = [ "sp-core", "sp-externalities", "sp-keystore", - "sp-maybe-compressed-blob", "sp-runtime-interface", "sp-state-machine", "sp-std", @@ -9532,9 +9601,9 @@ dependencies = [ "num-traits", "parity-scale-codec", "parking_lot 0.11.1", - "pretty_assertions 0.6.1", + "pretty_assertions", "rand 0.7.3", - "smallvec 1.6.1", + "smallvec 1.7.0", "sp-core", "sp-externalities", "sp-panic-handler", @@ -9607,13 +9676,7 @@ dependencies = [ name = "sp-tracing" version = "4.0.0-dev" dependencies = [ - "erased-serde", - "log 0.4.14", "parity-scale-codec", - "parking_lot 0.10.2", - "serde", - "serde_json", - "slog", "sp-std", "tracing", "tracing-core", @@ -9704,6 +9767,20 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +[[package]] +name = "ss58-registry" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef2413ecc7946ca99368862851dc1359f1477bc654ecfb135cf3efcb85ceca5f" +dependencies = [ + "Inflector", + "proc-macro2", + "quote", + "serde", + "serde_json", + "unicode-xid", +] + [[package]] name = "stable_deref_trait" version = "1.2.0" @@ -9779,6 +9856,9 @@ name = "strum" version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aaf86bbcfd1fa9670b7a129f64fc0c9fcbbfe4f1bc4210e9e98fe71ffc12cde2" +dependencies = [ + "strum_macros 0.21.1", +] [[package]] name = "strum_macros" @@ -10061,9 +10141,9 @@ checksum = "1e81da0851ada1f3e9d4312c704aa4f8806f0f9d69faaf8df2f3464b4a9437c2" [[package]] name = "syn" -version = "1.0.76" +version = "1.0.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6f107db402c2c2055242dbf4d2af0e69197202e9faacbef9571bbe47f5a1b84" +checksum = "d010a1623fbd906d51d650a9916aaefc05ffa0e4053ff7fe601167f3e715d194" dependencies = [ "proc-macro2", "quote", @@ -10260,9 +10340,9 @@ dependencies = [ [[package]] name = "tiny-bip39" -version = "0.8.0" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9e44c4759bae7f1032e286a7ef990bd9ed23fe831b7eeba0beb97484c2e59b8" +checksum = "ffc59cb9dfc85bb312c3a78fd6aa8a8582e310b0fa885d5bb877f6dcc601839d" dependencies = [ "anyhow", "hmac 0.8.1", @@ -10270,9 +10350,10 @@ dependencies = [ "pbkdf2 0.4.0", "rand 0.7.3", "rustc-hash", - "sha2 0.9.3", + "sha2 0.9.8", "thiserror", "unicode-normalization", + "wasm-bindgen", "zeroize", ] @@ -10312,9 +10393,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.10.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01cf844b23c6131f624accf65ce0e4e9956a8bb329400ea5bcc26ae3a5c20b0b" +checksum = "c2c2416fdedca8443ae44b4527de1ea633af61d8f7169ffa6e72c5b53d24efcc" dependencies = [ "autocfg 1.0.1", "bytes 1.0.1", @@ -10491,9 +10572,9 @@ checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" -version = "0.1.25" +version = "0.1.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01ebdc2bb4498ab1ab5f5b73c5803825e60199229ccba0698170e3be0e7f959f" +checksum = "375a639232caf30edfc78e8d89b2d4c375515393e7af7e16f01cd96917fb2105" dependencies = [ "cfg-if 1.0.0", "pin-project-lite 0.2.6", @@ -10503,9 +10584,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.15" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c42e6fa53307c8a17e4ccd4dc81cf5ec38db9209f59b222210375b54ee40d1e2" +checksum = "f4f480b8f81512e825f337ad51e94c1eb5d3bbdf2b363dcd01e2b19a9ffe3f8e" dependencies = [ "proc-macro2", "quote", @@ -10514,9 +10595,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.17" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f50de3927f93d202783f4513cda820ab47ef17f624b03c096e86ef00c67e6b5f" +checksum = "1f4ed65637b8390770814083d20756f87bfa2c21bf2f110babdc5438351746e4" dependencies = [ "lazy_static", ] @@ -10527,7 +10608,7 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" dependencies = [ - "pin-project 1.0.5", + "pin-project 1.0.8", "tracing", ] @@ -10566,7 +10647,7 @@ dependencies = [ "serde", "serde_json", "sharded-slab", - "smallvec 1.6.1", + "smallvec 1.7.0", "thread_local", "tracing", "tracing-core", @@ -10612,7 +10693,7 @@ dependencies = [ "hashbrown 0.11.2", "log 0.4.14", "rustc-hex", - "smallvec 1.6.1", + "smallvec 1.7.0", ] [[package]] @@ -10652,7 +10733,7 @@ dependencies = [ "lazy_static", "log 0.4.14", "rand 0.8.4", - "smallvec 1.6.1", + "smallvec 1.7.0", "thiserror", "tinyvec", "url 2.2.1", @@ -10672,7 +10753,7 @@ dependencies = [ "lru-cache", "parking_lot 0.11.1", "resolv-conf", - "smallvec 1.6.1", + "smallvec 1.7.0", "thiserror", "trust-dns-proto", ] @@ -10723,12 +10804,12 @@ dependencies = [ [[package]] name = "twox-hash" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04f8ab788026715fa63b31960869617cba39117e520eb415b0139543e325ab59" +checksum = "1f559b464de2e2bdabcac6a210d12e9b5a5973c251e102c44c585c71d51bd78e" dependencies = [ - "cfg-if 0.1.10", - "rand 0.7.3", + "cfg-if 1.0.0", + "rand 0.8.4", "static_assertions", ] @@ -10980,9 +11061,9 @@ checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" [[package]] name = "wasm-bindgen" -version = "0.2.73" +version = "0.2.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83240549659d187488f91f33c0f8547cbfef0b2088bc470c116d1d260ef623d9" +checksum = "5e68338db6becec24d3c7977b5bf8a48be992c934b5d07177e3931f5dc9b076c" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -10990,9 +11071,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.73" +version = "0.2.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae70622411ca953215ca6d06d3ebeb1e915f0f6613e3b495122878d7ebec7dae" +checksum = "f34c405b4f0658583dba0c1c7c9b694f3cac32655db463b56c254a1c75269523" dependencies = [ "bumpalo", "lazy_static", @@ -11017,9 +11098,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.73" +version = "0.2.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e734d91443f177bfdb41969de821e15c516931c3c3db3d318fa1b68975d0f6f" +checksum = "b9d5a6580be83b19dc570a8f9c324251687ab2184e57086f71625feb57ec77c8" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -11027,9 +11108,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.73" +version = "0.2.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53739ff08c8a68b0fdbcd54c372b8ab800b1449ab3c9d706503bc7dd1621b2c" +checksum = "e3775a030dc6f5a0afd8a84981a21cc92a781eb429acef9ecce476d0c9113e92" dependencies = [ "proc-macro2", "quote", @@ -11040,9 +11121,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.73" +version = "0.2.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9a543ae66aa233d14bb765ed9af4a33e81b8b58d1584cf1b47ff8cd0b9e4489" +checksum = "c279e376c7a8e8752a8f1eaa35b7b0bee6bb9fb0cdacfa97cc3f1f289c87e2b4" [[package]] name = "wasm-gc-api" @@ -11102,7 +11183,7 @@ dependencies = [ "enumset", "serde", "serde_bytes", - "smallvec 1.6.1", + "smallvec 1.7.0", "target-lexicon 0.11.2", "thiserror", "wasmer-types", @@ -11122,7 +11203,7 @@ dependencies = [ "more-asserts", "rayon", "serde", - "smallvec 1.6.1", + "smallvec 1.7.0", "tracing", "wasmer-compiler", "wasmer-types", @@ -11142,7 +11223,7 @@ dependencies = [ "more-asserts", "rayon", "serde", - "smallvec 1.6.1", + "smallvec 1.7.0", "wasmer-compiler", "wasmer-types", "wasmer-vm", @@ -11297,15 +11378,15 @@ checksum = "87cc2fe6350834b4e528ba0901e7aa405d78b89dc1fa3145359eb4de0e323fcf" [[package]] name = "wasmparser" -version = "0.78.2" +version = "0.80.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52144d4c78e5cf8b055ceab8e5fa22814ce4315d6002ad32cfd914f37c12fd65" +checksum = "449167e2832691a1bff24cde28d2804e90e09586a448c8e76984792c44334a6b" [[package]] name = "wasmtime" -version = "0.27.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b310b9d20fcf59385761d1ade7a3ef06aecc380e3d3172035b919eaf7465d9f7" +checksum = "899b1e5261e3d3420860dacfb952871ace9d7ba9f953b314f67aaf9f8e2a4d89" dependencies = [ "anyhow", "backtrace", @@ -11316,27 +11397,28 @@ dependencies = [ "lazy_static", "libc", "log 0.4.14", + "object 0.26.0", "paste 1.0.4", "psm", + "rayon", "region", "rustc-demangle", "serde", - "smallvec 1.6.1", "target-lexicon 0.12.0", - "wasmparser 0.78.2", + "wasmparser 0.80.2", "wasmtime-cache", + "wasmtime-cranelift", "wasmtime-environ", "wasmtime-jit", - "wasmtime-profiling", "wasmtime-runtime", "winapi 0.3.9", ] [[package]] name = "wasmtime-cache" -version = "0.27.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d14d500d5c3dc5f5c097158feee123d64b3097f0d836a2a27dff9c761c73c843" +checksum = "e2493b81d7a9935f7af15e06beec806f256bc974a90a843685f3d61f2fc97058" dependencies = [ "anyhow", "base64 0.13.0", @@ -11347,7 +11429,7 @@ dependencies = [ "libc", "log 0.4.14", "serde", - "sha2 0.9.3", + "sha2 0.9.8", "toml", "winapi 0.3.9", "zstd", @@ -11355,122 +11437,76 @@ dependencies = [ [[package]] name = "wasmtime-cranelift" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c525b39f062eada7db3c1298287b96dcb6e472b9f6b22501300b28d9fa7582f6" -dependencies = [ - "cranelift-codegen 0.74.0", - "cranelift-entity 0.74.0", - "cranelift-frontend 0.74.0", - "cranelift-wasm", - "target-lexicon 0.12.0", - "wasmparser 0.78.2", - "wasmtime-environ", -] - -[[package]] -name = "wasmtime-debug" -version = "0.27.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5d2a763e7a6fc734218e0e463196762a4f409c483063d81e0e85f96343b2e0a" +checksum = "99706bacdf5143f7f967d417f0437cce83a724cf4518cb1a3ff40e519d793021" dependencies = [ "anyhow", - "gimli 0.24.0", + "cranelift-codegen 0.77.0", + "cranelift-entity 0.77.0", + "cranelift-frontend 0.77.0", + "cranelift-native", + "cranelift-wasm", + "gimli 0.25.0", "more-asserts", - "object 0.24.0", + "object 0.26.0", "target-lexicon 0.12.0", "thiserror", - "wasmparser 0.78.2", + "wasmparser 0.80.2", "wasmtime-environ", ] [[package]] name = "wasmtime-environ" -version = "0.27.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f64d0c2d881c31b0d65c1f2695e022d71eb60b9fbdd336aacca28208b58eac90" +checksum = "ac42cb562a2f98163857605f02581d719a410c5abe93606128c59a10e84de85b" dependencies = [ + "anyhow", "cfg-if 1.0.0", - "cranelift-codegen 0.74.0", - "cranelift-entity 0.74.0", - "cranelift-wasm", - "gimli 0.24.0", + "cranelift-entity 0.77.0", + "gimli 0.25.0", "indexmap", "log 0.4.14", "more-asserts", + "object 0.26.0", "serde", + "target-lexicon 0.12.0", "thiserror", - "wasmparser 0.78.2", + "wasmparser 0.80.2", + "wasmtime-types", ] [[package]] name = "wasmtime-jit" -version = "0.27.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d4539ea734422b7c868107e2187d7746d8affbcaa71916d72639f53757ad707" +checksum = "24f46dd757225f29a419be415ea6fb8558df9b0194f07e3a6a9c99d0e14dd534" dependencies = [ - "addr2line 0.15.1", + "addr2line", "anyhow", + "bincode", "cfg-if 1.0.0", - "cranelift-codegen 0.74.0", - "cranelift-entity 0.74.0", - "cranelift-frontend 0.74.0", - "cranelift-native", - "cranelift-wasm", - "gimli 0.24.0", + "gimli 0.25.0", + "libc", "log 0.4.14", "more-asserts", - "object 0.24.0", - "rayon", + "object 0.26.0", "region", "serde", "target-lexicon 0.12.0", "thiserror", - "wasmparser 0.78.2", - "wasmtime-cranelift", - "wasmtime-debug", + "wasmparser 0.80.2", "wasmtime-environ", - "wasmtime-obj", - "wasmtime-profiling", "wasmtime-runtime", "winapi 0.3.9", ] -[[package]] -name = "wasmtime-obj" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e1a8ff85246d091828e2225af521a6208ed28c997bb5c39eb697366dc2e2f2b" -dependencies = [ - "anyhow", - "more-asserts", - "object 0.24.0", - "target-lexicon 0.12.0", - "wasmtime-debug", - "wasmtime-environ", -] - -[[package]] -name = "wasmtime-profiling" -version = "0.27.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e24364d522dcd67c897c8fffc42e5bdfc57207bbb6d7eeade0da9d4a7d70105b" -dependencies = [ - "anyhow", - "cfg-if 1.0.0", - "lazy_static", - "libc", - "serde", - "target-lexicon 0.12.0", - "wasmtime-environ", - "wasmtime-runtime", -] - [[package]] name = "wasmtime-runtime" -version = "0.27.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c51e57976e8a19a18a18e002c6eb12e5769554204238e47ff155fda1809ef0f7" +checksum = "0122215a44923f395487048cb0a1d60b5b32c73aab15cf9364b798dbaff0996f" dependencies = [ "anyhow", "backtrace", @@ -11490,6 +11526,18 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "wasmtime-types" +version = "0.30.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9b01caf8a204ef634ebac99700e77ba716d3ebbb68a1abbc2ceb6b16dbec9e4" +dependencies = [ + "cranelift-entity 0.77.0", + "serde", + "thiserror", + "wasmparser 0.80.2", +] + [[package]] name = "wast" version = "38.0.0" @@ -11510,9 +11558,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.47" +version = "0.3.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c40dc691fc48003eba817c38da7113c15698142da971298003cac3ef175680b3" +checksum = "0a84d70d1ec7d2da2d26a5bd78f4bca1b8c3254805363ce743b7a05bc30d195a" dependencies = [ "js-sys", "wasm-bindgen", @@ -11698,18 +11746,18 @@ dependencies = [ [[package]] name = "zeroize" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "377db0846015f7ae377174787dd452e1c5f5a9050bc6f954911d01f116daa0cd" +checksum = "bf68b08513768deaa790264a7fac27a58cbf2705cfcdc9448362229217d7e970" dependencies = [ "zeroize_derive", ] [[package]] name = "zeroize_derive" -version = "1.0.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3f369ddb18862aba61aa49bf31e74d29f0f162dec753063200e1dc084345d16" +checksum = "bdff2024a851a322b08f179173ae2ba620445aef1e838f0c196820eade4ae0c7" dependencies = [ "proc-macro2", "quote", @@ -11719,18 +11767,18 @@ dependencies = [ [[package]] name = "zstd" -version = "0.6.1+zstd.1.4.9" +version = "0.9.0+zstd.1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5de55e77f798f205d8561b8fe2ef57abfb6e0ff2abe7fd3c089e119cdb5631a3" +checksum = "07749a5dc2cb6b36661290245e350f15ec3bbb304e493db54a1d354480522ccd" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "3.0.1+zstd.1.4.9" +version = "4.1.1+zstd.1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1387cabcd938127b30ce78c4bf00b30387dddf704e3f0881dbc4ff62b5566f8c" +checksum = "c91c90f2c593b003603e5e0493c837088df4469da25aafff8bce42ba48caf079" dependencies = [ "libc", "zstd-sys", @@ -11738,9 +11786,9 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "1.4.20+zstd.1.4.9" +version = "1.6.1+zstd.1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebd5b733d7cf2d9447e2c3e76a5589b4f5e5ae065c22a2bc0b023cbc331b6c8e" +checksum = "615120c7a2431d16cf1cf979e7fc31ba7a5b5e5707b29c8a99e5dbf8a8392a33" dependencies = [ "cc", "libc", diff --git a/Cargo.toml b/Cargo.toml index e110c27b20d77..71473a4bc5689 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,6 +19,8 @@ members = [ "client/api", "client/authority-discovery", "client/basic-authorship", + "client/beefy", + "client/beefy/rpc", "client/block-builder", "client/chain-spec", "client/chain-spec/derive", @@ -69,6 +71,9 @@ members = [ "frame/authorship", "frame/babe", "frame/balances", + "frame/beefy", + "frame/beefy-mmr", + "frame/beefy-mmr/primitives", "frame/benchmarking", "frame/bounties", "frame/collective", @@ -138,6 +143,7 @@ members = [ "primitives/arithmetic/fuzzer", "primitives/authority-discovery", "primitives/authorship", + "primitives/beefy", "primitives/block-builder", "primitives/blockchain", "primitives/consensus/aura", diff --git a/README.md b/README.md index 6288540548a0d..b716794428a00 100644 --- a/README.md +++ b/README.md @@ -8,9 +8,9 @@ Substrate is a next-generation framework for blockchain innovation 🚀. ## Trying it out -Simply go to [substrate.dev](https://substrate.dev) and follow the -[installation](https://substrate.dev/docs/en/knowledgebase/getting-started/) instructions. You can -also try out one of the [tutorials](https://substrate.dev/en/tutorials). +Simply go to [docs.substrate.io](https://docs.substrate.io) and follow the +[installation](https://docs.substrate.io/v3/getting-started/overview) instructions. You can +also try out one of the [tutorials](https://docs.substrate.io/tutorials/). ## Contributions & Code of Conduct @@ -28,3 +28,4 @@ The security policy and procedures can be found in [`docs/SECURITY.md`](docs/SEC The reason for the split-licensing is to ensure that for the vast majority of teams using Substrate to create feature-chains, then all changes can be made entirely in Apache2-licensed code, allowing teams full freedom over what and how they release and giving licensing clarity to commercial teams. In the interests of the community, we require any deeper improvements made to Substrate's core logic (e.g. Substrate's internal consensus, crypto or database code) to be contributed back so everyone can benefit. + diff --git a/bin/node-template/README.md b/bin/node-template/README.md index cd977fac84493..2397c57363b77 100644 --- a/bin/node-template/README.md +++ b/bin/node-template/README.md @@ -1,14 +1,25 @@ # Substrate Node Template +[![Try on playground](https://img.shields.io/badge/Playground-Node_Template-brightgreen?logo=Parity%20Substrate)](https://docs.substrate.io/playground/) [![Matrix](https://img.shields.io/matrix/substrate-technical:matrix.org)](https://matrix.to/#/#substrate-technical:matrix.org) + A fresh FRAME-based [Substrate](https://www.substrate.io/) node, ready for hacking :rocket: ## Getting Started -Follow these steps to get started with the Node Template :hammer_and_wrench: +Follow the steps below to get started with the Node Template, or get it up and running right from +your browser in just a few clicks using +the [Substrate Playground](https://docs.substrate.io/playground/) :hammer_and_wrench: + +### Using Nix + +Install [nix](https://nixos.org/) and optionally [direnv](https://github.com/direnv/direnv) and +[lorri](https://github.com/target/lorri) for a fully plug and play experience for setting up the +development environment. To get all the correct dependencies activate direnv `direnv allow` and +lorri `lorri shell`. ### Rust Setup -First, complete the [basic Rust setup instructions](./doc/rust-setup.md). +First, complete the [basic Rust setup instructions](./docs/rust-setup.md). ### Run @@ -62,10 +73,17 @@ Start the development chain with detailed logging: RUST_BACKTRACE=1 ./target/release/node-template -ldebug --dev ``` +### Connect with Polkadot-JS Apps Front-end + +Once the node template is running locally, you can connect it with **Polkadot-JS Apps** front-end +to interact with your chain. [Click +here](https://polkadot.js.org/apps/#/explorer?rpc=ws://localhost:9944) connecting the Apps to your +local node template. + ### Multi-Node Local Testnet -If you want to see the multi-node consensus algorithm in action, refer to -[our Start a Private Network tutorial](https://substrate.dev/docs/en/tutorials/start-a-private-network/). +If you want to see the multi-node consensus algorithm in action, refer to our +[Start a Private Network tutorial](https://docs.substrate.io/tutorials/v3/private-network). ## Template Structure @@ -77,34 +95,34 @@ directories. A blockchain node is an application that allows users to participate in a blockchain network. Substrate-based blockchain nodes expose a number of capabilities: -- Networking: Substrate nodes use the [`libp2p`](https://libp2p.io/) networking stack to allow the - nodes in the network to communicate with one another. -- Consensus: Blockchains must have a way to come to - [consensus](https://substrate.dev/docs/en/knowledgebase/advanced/consensus) on the state of the - network. Substrate makes it possible to supply custom consensus engines and also ships with - several consensus mechanisms that have been built on top of - [Web3 Foundation research](https://research.web3.foundation/en/latest/polkadot/NPoS/index.html). -- RPC Server: A remote procedure call (RPC) server is used to interact with Substrate nodes. +- Networking: Substrate nodes use the [`libp2p`](https://libp2p.io/) networking stack to allow the + nodes in the network to communicate with one another. +- Consensus: Blockchains must have a way to come to + [consensus](https://docs.substrate.io/v3/advanced/consensus) on the state of the + network. Substrate makes it possible to supply custom consensus engines and also ships with + several consensus mechanisms that have been built on top of + [Web3 Foundation research](https://research.web3.foundation/en/latest/polkadot/NPoS/index.html). +- RPC Server: A remote procedure call (RPC) server is used to interact with Substrate nodes. There are several files in the `node` directory - take special note of the following: -- [`chain_spec.rs`](./node/src/chain_spec.rs): A - [chain specification](https://substrate.dev/docs/en/knowledgebase/integrate/chain-spec) is a - source code file that defines a Substrate chain's initial (genesis) state. Chain specifications - are useful for development and testing, and critical when architecting the launch of a - production chain. Take note of the `development_config` and `testnet_genesis` functions, which - are used to define the genesis state for the local development chain configuration. These - functions identify some - [well-known accounts](https://substrate.dev/docs/en/knowledgebase/integrate/subkey#well-known-keys) - and use them to configure the blockchain's initial state. -- [`service.rs`](./node/src/service.rs): This file defines the node implementation. Take note of - the libraries that this file imports and the names of the functions it invokes. In particular, - there are references to consensus-related topics, such as the - [longest chain rule](https://substrate.dev/docs/en/knowledgebase/advanced/consensus#longest-chain-rule), - the [Aura](https://substrate.dev/docs/en/knowledgebase/advanced/consensus#aura) block authoring - mechanism and the - [GRANDPA](https://substrate.dev/docs/en/knowledgebase/advanced/consensus#grandpa) finality - gadget. +- [`chain_spec.rs`](./node/src/chain_spec.rs): A + [chain specification](https://docs.substrate.io/v3/runtime/chain-specs) is a + source code file that defines a Substrate chain's initial (genesis) state. Chain specifications + are useful for development and testing, and critical when architecting the launch of a + production chain. Take note of the `development_config` and `testnet_genesis` functions, which + are used to define the genesis state for the local development chain configuration. These + functions identify some + [well-known accounts](https://docs.substrate.io/v3/tools/subkey#well-known-keys) + and use them to configure the blockchain's initial state. +- [`service.rs`](./node/src/service.rs): This file defines the node implementation. Take note of + the libraries that this file imports and the names of the functions it invokes. In particular, + there are references to consensus-related topics, such as the + [longest chain rule](https://docs.substrate.io/v3/advanced/consensus#longest-chain-rule), + the [Aura](https://docs.substrate.io/v3/advanced/consensus#aura) block authoring + mechanism and the + [GRANDPA](https://docs.substrate.io/v3/advanced/consensus#grandpa) finality + gadget. After the node has been [built](#build), refer to the embedded documentation to learn more about the capabilities and configuration parameters that it exposes: @@ -116,27 +134,27 @@ capabilities and configuration parameters that it exposes: ### Runtime In Substrate, the terms -"[runtime](https://substrate.dev/docs/en/knowledgebase/getting-started/glossary#runtime)" and -"[state transition function](https://substrate.dev/docs/en/knowledgebase/getting-started/glossary#stf-state-transition-function)" +"[runtime](https://docs.substrate.io/v3/getting-started/glossary#runtime)" and +"[state transition function](https://docs.substrate.io/v3/getting-started/glossary#state-transition-function-stf)" are analogous - they refer to the core logic of the blockchain that is responsible for validating blocks and executing the state changes they define. The Substrate project in this repository uses -the [FRAME](https://substrate.dev/docs/en/knowledgebase/runtime/frame) framework to construct a +the [FRAME](https://docs.substrate.io/v3/runtime/frame) framework to construct a blockchain runtime. FRAME allows runtime developers to declare domain-specific logic in modules called "pallets". At the heart of FRAME is a helpful -[macro language](https://substrate.dev/docs/en/knowledgebase/runtime/macros) that makes it easy to +[macro language](https://docs.substrate.io/v3/runtime/macros) that makes it easy to create pallets and flexibly compose them to create blockchains that can address [a variety of needs](https://www.substrate.io/substrate-users/). Review the [FRAME runtime implementation](./runtime/src/lib.rs) included in this template and note the following: -- This file configures several pallets to include in the runtime. Each pallet configuration is - defined by a code block that begins with `impl $PALLET_NAME::Config for Runtime`. -- The pallets are composed into a single runtime by way of the - [`construct_runtime!`](https://crates.parity.io/frame_support/macro.construct_runtime.html) - macro, which is part of the core - [FRAME Support](https://substrate.dev/docs/en/knowledgebase/runtime/frame#support-library) - library. +- This file configures several pallets to include in the runtime. Each pallet configuration is + defined by a code block that begins with `impl $PALLET_NAME::Config for Runtime`. +- The pallets are composed into a single runtime by way of the + [`construct_runtime!`](https://crates.parity.io/frame_support/macro.construct_runtime.html) + macro, which is part of the core + [FRAME Support](https://docs.substrate.io/v3/runtime/frame#support-crate) + library. ### Pallets @@ -146,17 +164,17 @@ template pallet that is [defined in the `pallets`](./pallets/template/src/lib.rs A FRAME pallet is compromised of a number of blockchain primitives: -- Storage: FRAME defines a rich set of powerful - [storage abstractions](https://substrate.dev/docs/en/knowledgebase/runtime/storage) that makes - it easy to use Substrate's efficient key-value database to manage the evolving state of a - blockchain. -- Dispatchables: FRAME pallets define special types of functions that can be invoked (dispatched) - from outside of the runtime in order to update its state. -- Events: Substrate uses [events](https://substrate.dev/docs/en/knowledgebase/runtime/events) to - notify users of important changes in the runtime. -- Errors: When a dispatchable fails, it returns an error. -- Config: The `Config` configuration interface is used to define the types and parameters upon - which a FRAME pallet depends. +- Storage: FRAME defines a rich set of powerful + [storage abstractions](https://docs.substrate.io/v3/runtime/storage) that makes + it easy to use Substrate's efficient key-value database to manage the evolving state of a + blockchain. +- Dispatchables: FRAME pallets define special types of functions that can be invoked (dispatched) + from outside of the runtime in order to update its state. +- Events: Substrate uses [events and errors](https://docs.substrate.io/v3/runtime/events-and-errors) + to notify users of important changes in the runtime. +- Errors: When a dispatchable fails, it returns an error. +- Config: The `Config` configuration interface is used to define the types and parameters upon + which a FRAME pallet depends. ### Run in Docker @@ -170,7 +188,8 @@ Then run the following command to start a single node development chain. ``` This command will firstly compile your code, and then start a local development network. You can -also replace the default command (`cargo build --release && ./target/release/node-template --dev --ws-external`) +also replace the default command +(`cargo build --release && ./target/release/node-template --dev --ws-external`) by appending your own. A few useful ones are as follow. ```bash diff --git a/bin/node-template/docs/rust-setup.md b/bin/node-template/docs/rust-setup.md index 34f6e43e7f0dd..4b96da1146b8e 100644 --- a/bin/node-template/docs/rust-setup.md +++ b/bin/node-template/docs/rust-setup.md @@ -12,8 +12,9 @@ commands for Rust's toolchains will be the same for all supported, Unix-based op ## Unix-Based Operating Systems Substrate development is easiest on Unix-based operating systems like macOS or Linux. The examples -in the Substrate [Tutorials](https://substrate.dev/tutorials) and [Recipes](https://substrate.dev/recipes/) -use Unix-style terminals to demonstrate how to interact with Substrate from the command line. +in the Substrate [Tutorials](https://docs.substrate.io/tutorials/v3) and +[How-to Guides](https://docs.substrate.io/how-to-guides/v3) use Unix-style terminals to demonstrate +how to interact with Substrate from the command line. ### macOS diff --git a/bin/node-template/node/Cargo.toml b/bin/node-template/node/Cargo.toml index 04d70b338ac04..57d9c5f3f71e0 100644 --- a/bin/node-template/node/Cargo.toml +++ b/bin/node-template/node/Cargo.toml @@ -1,14 +1,14 @@ [package] name = "node-template" version = "3.0.0" -authors = ["Substrate DevHub "] description = "A fresh FRAME-based Substrate node, ready for hacking." +authors = ["Substrate DevHub "] +homepage = "https://substrate.io/" edition = "2018" license = "Unlicense" -build = "build.rs" -homepage = "https://substrate.dev" -repository = "https://github.com/substrate-developer-hub/substrate-node-template/" publish = false +repository = "https://github.com/substrate-developer-hub/substrate-node-template/" +build = "build.rs" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/bin/node-template/node/src/cli.rs b/bin/node-template/node/src/cli.rs index 8b551051c1b19..8ed1d35ba5f92 100644 --- a/bin/node-template/node/src/cli.rs +++ b/bin/node-template/node/src/cli.rs @@ -35,7 +35,7 @@ pub enum Subcommand { /// Revert the chain to a previous state. Revert(sc_cli::RevertCmd), - /// The custom benchmark subcommmand benchmarking runtime pallets. + /// The custom benchmark subcommand benchmarking runtime pallets. #[structopt(name = "benchmark", about = "Benchmark runtime pallets.")] Benchmark(frame_benchmarking_cli::BenchmarkCmd), } diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 0f09ef436acad..2286ad3bd654f 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -16,7 +16,12 @@ use std::{sync::Arc, time::Duration}; pub struct ExecutorDispatch; impl sc_executor::NativeExecutionDispatch for ExecutorDispatch { + /// Only enable the benchmarking host functions when we actually want to benchmark. + #[cfg(feature = "runtime-benchmarks")] type ExtendHostFunctions = frame_benchmarking::benchmarking::HostFunctions; + /// Otherwise we only use the default Substrate host functions. + #[cfg(not(feature = "runtime-benchmarks"))] + type ExtendHostFunctions = (); fn dispatch(method: &str, data: &[u8]) -> Option> { node_template_runtime::api::dispatch(method, data) @@ -179,6 +184,7 @@ pub fn new_full(mut config: Configuration) -> Result let warp_sync = Arc::new(sc_finality_grandpa::warp_proof::NetworkProvider::new( backend.clone(), grandpa_link.shared_authority_set().clone(), + Vec::default(), )); let (network, system_rpc_tx, network_starter) = @@ -409,6 +415,7 @@ pub fn new_light(mut config: Configuration) -> Result let warp_sync = Arc::new(sc_finality_grandpa::warp_proof::NetworkProvider::new( backend.clone(), grandpa_link.shared_authority_set().clone(), + Vec::default(), )); let (network, system_rpc_tx, network_starter) = diff --git a/bin/node-template/pallets/template/Cargo.toml b/bin/node-template/pallets/template/Cargo.toml index b3eb747625b4f..7ea5628b97c1e 100644 --- a/bin/node-template/pallets/template/Cargo.toml +++ b/bin/node-template/pallets/template/Cargo.toml @@ -1,14 +1,13 @@ [package] -authors = ['Substrate DevHub '] -edition = '2018' name = 'pallet-template' version = "3.0.0" -license = "Unlicense" -homepage = "https://substrate.dev" -repository = "https://github.com/substrate-developer-hub/substrate-node-template/" description = "FRAME pallet template for defining custom runtime logic." -readme = "README.md" +authors = ['Substrate DevHub '] +homepage = "https://substrate.io/" +edition = '2018' +license = "Unlicense" publish = false +repository = "https://github.com/substrate-developer-hub/substrate-node-template/" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/bin/node-template/pallets/template/src/benchmarking.rs b/bin/node-template/pallets/template/src/benchmarking.rs index 2117c048cfbdb..d496a9fc89b1a 100644 --- a/bin/node-template/pallets/template/src/benchmarking.rs +++ b/bin/node-template/pallets/template/src/benchmarking.rs @@ -4,7 +4,7 @@ use super::*; #[allow(unused)] use crate::Pallet as Template; -use frame_benchmarking::{benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_benchmarking::{benchmarks, whitelisted_caller}; use frame_system::RawOrigin; benchmarks! { @@ -15,6 +15,6 @@ benchmarks! { verify { assert_eq!(Something::::get(), Some(s)); } -} -impl_benchmark_test_suite!(Template, crate::mock::new_test_ext(), crate::mock::Test); + impl_benchmark_test_suite!(Template, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/bin/node-template/pallets/template/src/lib.rs b/bin/node-template/pallets/template/src/lib.rs index ee3ca695b64da..18599168f1a63 100644 --- a/bin/node-template/pallets/template/src/lib.rs +++ b/bin/node-template/pallets/template/src/lib.rs @@ -2,7 +2,7 @@ /// Edit this file to define custom logic or remove it if it is not needed. /// Learn more about FRAME and the core library of Substrate FRAME pallets: -/// +/// pub use pallet::*; #[cfg(test)] @@ -31,15 +31,15 @@ pub mod pallet { pub struct Pallet(_); // The pallet's runtime storage items. - // https://substrate.dev/docs/en/knowledgebase/runtime/storage + // https://docs.substrate.io/v3/runtime/storage #[pallet::storage] #[pallet::getter(fn something)] // Learn more about declaring storage items: - // https://substrate.dev/docs/en/knowledgebase/runtime/storage#declaring-storage-items + // https://docs.substrate.io/v3/runtime/storage#declaring-storage-items pub type Something = StorageValue<_, u32>; // Pallets use events to inform users when important changes are made. - // https://substrate.dev/docs/en/knowledgebase/runtime/events + // https://docs.substrate.io/v3/runtime/events-and-errors #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { @@ -68,7 +68,7 @@ pub mod pallet { pub fn do_something(origin: OriginFor, something: u32) -> DispatchResult { // Check that the extrinsic was signed and get the signer. // This function will return an error if the extrinsic is not signed. - // https://substrate.dev/docs/en/knowledgebase/runtime/origin + // https://docs.substrate.io/v3/runtime/origins let who = ensure_signed(origin)?; // Update storage. diff --git a/bin/node-template/runtime/Cargo.toml b/bin/node-template/runtime/Cargo.toml index 47e67af2b9ae1..e9f557f3fb5a4 100644 --- a/bin/node-template/runtime/Cargo.toml +++ b/bin/node-template/runtime/Cargo.toml @@ -1,12 +1,13 @@ [package] name = "node-template-runtime" version = "3.0.0" +description = 'A fresh FRAME-based Substrate runtime, ready for hacking.' authors = ["Substrate DevHub "] +homepage = "https://substrate.io/" edition = "2018" license = "Unlicense" -homepage = "https://substrate.dev" -repository = "https://github.com/substrate-developer-hub/substrate-node-template/" publish = false +repository = "https://github.com/substrate-developer-hub/substrate-node-template/" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -44,7 +45,7 @@ pallet-transaction-payment-rpc-runtime-api = { version = "4.0.0-dev", default-fe # Used for runtime benchmarking frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../../../frame/benchmarking", optional = true } frame-system-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../../../frame/system/benchmarking", optional = true } -hex-literal = { version = "0.3.1", optional = true } +hex-literal = { version = "0.3.3", optional = true } pallet-template = { version = "3.0.0", default-features = false, path = "../pallets/template" } diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index eecc93e166666..4b49cb48ef352 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -87,7 +87,7 @@ pub mod opaque { } // To learn more about runtime versioning and what each of the following value means: -// https://substrate.dev/docs/en/knowledgebase/runtime/upgrades#runtime-versioning +// https://docs.substrate.io/v3/runtime/upgrades#runtime-versioning #[sp_version::runtime_version] pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("node-template"), @@ -222,6 +222,7 @@ impl pallet_grandpa::Config for Runtime { type HandleEquivocation = (); type WeightInfo = (); + type MaxAuthorities = MaxAuthorities; } parameter_types! { @@ -257,11 +258,13 @@ impl pallet_balances::Config for Runtime { parameter_types! { pub const TransactionByteFee: Balance = 1; + pub OperationalFeeMultiplier: u8 = 5; } impl pallet_transaction_payment::Config for Runtime { type OnChargeTransaction = CurrencyAdapter; type TransactionByteFee = TransactionByteFee; + type OperationalFeeMultiplier = OperationalFeeMultiplier; type WeightToFee = IdentityFee; type FeeMultiplierUpdate = (); } @@ -505,7 +508,6 @@ impl_runtime_apis! { add_benchmark!(params, batches, pallet_timestamp, Timestamp); add_benchmark!(params, batches, pallet_template, TemplateModule); - if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } Ok(batches) } } diff --git a/bin/node-template/shell.nix b/bin/node-template/shell.nix new file mode 100644 index 0000000000000..c08005c1630e9 --- /dev/null +++ b/bin/node-template/shell.nix @@ -0,0 +1,35 @@ +let + mozillaOverlay = + import (builtins.fetchGit { + url = "https://github.com/mozilla/nixpkgs-mozilla.git"; + rev = "57c8084c7ef41366993909c20491e359bbb90f54"; + }); + pinned = builtins.fetchGit { + # Descriptive name to make the store path easier to identify + url = "https://github.com/nixos/nixpkgs/"; + # Commit hash for nixos-unstable as of 2020-04-26 + # `git ls-remote https://github.com/nixos/nixpkgs nixos-unstable` + ref = "refs/heads/nixos-unstable"; + rev = "1fe6ed37fd9beb92afe90671c0c2a662a03463dd"; + }; + nixpkgs = import pinned { overlays = [ mozillaOverlay ]; }; + toolchain = with nixpkgs; (rustChannelOf { date = "2021-09-14"; channel = "nightly"; }); + rust-wasm = toolchain.rust.override { + targets = [ "wasm32-unknown-unknown" ]; + }; +in +with nixpkgs; pkgs.mkShell { + buildInputs = [ + clang + pkg-config + rust-wasm + ] ++ stdenv.lib.optionals stdenv.isDarwin [ + darwin.apple_sdk.frameworks.Security + ]; + + LIBCLANG_PATH = "${llvmPackages.libclang}/lib"; + PROTOC = "${protobuf}/bin/protoc"; + RUST_SRC_PATH = "${toolchain.rust-src}/lib/rustlib/src/rust/library/"; + ROCKSDB_LIB_DIR = "${rocksdb}/lib"; + +} diff --git a/bin/node/bench/src/construct.rs b/bin/node/bench/src/construct.rs index 1532e02bd3ef6..ca1a1c18f9ea9 100644 --- a/bin/node/bench/src/construct.rs +++ b/bin/node/bench/src/construct.rs @@ -30,8 +30,8 @@ use std::{borrow::Cow, collections::HashMap, pin::Pin, sync::Arc}; use node_primitives::Block; use node_testing::bench::{BenchDb, BlockType, DatabaseType, KeyTypes, Profile}; use sc_transaction_pool_api::{ - ImportNotificationStream, PoolFuture, PoolStatus, TransactionFor, TransactionSource, - TransactionStatusStreamFor, TxHash, + ImportNotificationStream, PoolFuture, PoolStatus, ReadyTransactions, TransactionFor, + TransactionSource, TransactionStatusStreamFor, TxHash, }; use sp_consensus::{Environment, Proposer}; use sp_inherents::InherentDataProvider; @@ -216,6 +216,19 @@ impl sc_transaction_pool_api::InPoolTransaction for PoolTransaction { #[derive(Clone, Debug)] pub struct Transactions(Vec>); +pub struct TransactionsIterator(std::vec::IntoIter>); + +impl Iterator for TransactionsIterator { + type Item = Arc; + + fn next(&mut self) -> Option { + self.0.next() + } +} + +impl ReadyTransactions for TransactionsIterator { + fn report_invalid(&mut self, _tx: &Self::Item) {} +} impl sc_transaction_pool_api::TransactionPool for Transactions { type Block = Block; @@ -257,16 +270,17 @@ impl sc_transaction_pool_api::TransactionPool for Transactions { _at: NumberFor, ) -> Pin< Box< - dyn Future> + Send>> - + Send, + dyn Future< + Output = Box> + Send>, + > + Send, >, > { - let iter: Box> + Send> = - Box::new(self.0.clone().into_iter()); + let iter: Box> + Send> = + Box::new(TransactionsIterator(self.0.clone().into_iter())); Box::pin(futures::future::ready(iter)) } - fn ready(&self) -> Box> + Send> { + fn ready(&self) -> Box> + Send> { unimplemented!() } diff --git a/bin/node/bench/src/import.rs b/bin/node/bench/src/import.rs index 5bbf1ddf3b73e..da9d0cdaf85b8 100644 --- a/bin/node/bench/src/import.rs +++ b/bin/node/bench/src/import.rs @@ -135,18 +135,20 @@ impl core::Benchmark for ImportBenchmark { .inspect_state(|| { match self.block_type { BlockType::RandomTransfersKeepAlive => { - // should be 5 per signed extrinsic + 1 per unsigned + // should be 7 per signed extrinsic + 1 per unsigned // we have 1 unsigned and the rest are signed in the block - // those 5 events per signed are: - // - new account (RawEvent::NewAccount) as we always transfer fund to - // non-existant account - // - endowed (RawEvent::Endowed) for this new account - // - successful transfer (RawEvent::Transfer) for this transfer operation - // - deposit event for charging transaction fee + // those 7 events per signed are: + // - withdraw (Balances::Withdraw) for charging the transaction fee + // - new account (System::NewAccount) as we always transfer fund to + // non-existent account + // - endowed (Balances::Endowed) for this new account + // - successful transfer (Event::Transfer) for this transfer operation + // - 2x deposit (Balances::Deposit and Treasury::Deposit) for depositing + // the transaction fee into the treasury // - extrinsic success assert_eq!( node_runtime::System::events().len(), - (self.block.extrinsics.len() - 1) * 5 + 1, + (self.block.extrinsics.len() - 1) * 7 + 1, ); }, BlockType::Noop => { diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 6a12af4b278b7..1d394dd952db0 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -7,7 +7,7 @@ build = "build.rs" edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" default-run = "substrate" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" [package.metadata.wasm-pack.profile.release] @@ -37,7 +37,7 @@ crate-type = ["cdylib", "rlib"] codec = { package = "parity-scale-codec", version = "2.0.0" } serde = { version = "1.0.126", features = ["derive"] } futures = "0.3.16" -hex-literal = "0.3.1" +hex-literal = "0.3.3" log = "0.4.8" rand = "0.7.2" structopt = { version = "0.3.8", optional = true } @@ -46,6 +46,7 @@ structopt = { version = "0.3.8", optional = true } sp-authority-discovery = { version = "4.0.0-dev", path = "../../../primitives/authority-discovery" } sp-consensus-babe = { version = "0.10.0-dev", path = "../../../primitives/consensus/babe" } grandpa-primitives = { version = "4.0.0-dev", package = "sp-finality-grandpa", path = "../../../primitives/finality-grandpa" } +sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } sp-timestamp = { version = "4.0.0-dev", path = "../../../primitives/timestamp" } @@ -78,6 +79,7 @@ sc-sync-state-rpc = { version = "0.10.0-dev", path = "../../../client/sync-state # frame dependencies frame-system = { version = "4.0.0-dev", path = "../../../frame/system" } +frame-system-rpc-runtime-api = { version = "4.0.0-dev", path = "../../../frame/system/rpc/runtime-api" } pallet-transaction-payment = { version = "4.0.0-dev", path = "../../../frame/transaction-payment" } pallet-im-online = { version = "4.0.0-dev", default-features = false, path = "../../../frame/im-online" } @@ -117,13 +119,18 @@ sc-service-test = { version = "2.0.0", path = "../../../client/service/test" } sp-tracing = { version = "4.0.0-dev", path = "../../../primitives/tracing" } futures = "0.3.16" tempfile = "3.1.0" -assert_cmd = "1.0" +assert_cmd = "2.0.2" nix = "0.19" serde_json = "1.0" regex = "1" platforms = "1.1" -async-std = { version = "1.6.5", features = ["attributes"] } +async-std = { version = "1.10.0", features = ["attributes"] } soketto = "0.4.2" +criterion = { version = "0.3.5", features = [ "async_tokio" ] } +tokio = { version = "1.10", features = ["macros", "time"] } +jsonrpsee-ws-client = { version = "0.3.1", default-features = false, features = ["tokio1"] } +wait-timeout = "0.2" +remote-externalities = { path = "../../../utils/frame/remote-externalities" } [build-dependencies] structopt = { version = "0.3.8", optional = true } @@ -133,9 +140,10 @@ substrate-build-script-utils = { version = "3.0.0", optional = true, path = "../ substrate-frame-cli = { version = "4.0.0-dev", optional = true, path = "../../../utils/frame/frame-utilities-cli" } try-runtime-cli = { version = "0.10.0-dev", optional = true, path = "../../../utils/frame/try-runtime/cli" } sc-cli = { version = "0.10.0-dev", path = "../../../client/cli", optional = true } +pallet-balances = { version = "4.0.0-dev", path = "../../../frame/balances" } [features] -default = [ "cli" ] +default = ["cli"] cli = [ "node-executor/wasmi-errno", "node-inspect", @@ -154,3 +162,7 @@ runtime-benchmarks = [ # Enable features that allow the runtime to be tried and debugged. Name might be subject to change # in the near future. try-runtime = ["node-runtime/try-runtime", "try-runtime-cli"] + +[[bench]] +name = "transaction_pool" +harness = false diff --git a/bin/node/cli/benches/transaction_pool.rs b/bin/node/cli/benches/transaction_pool.rs new file mode 100644 index 0000000000000..4f5ccd6ea912f --- /dev/null +++ b/bin/node/cli/benches/transaction_pool.rs @@ -0,0 +1,274 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use criterion::{criterion_group, criterion_main, BatchSize, Criterion, Throughput}; +use futures::{future, StreamExt}; +use node_cli::service::{create_extrinsic, fetch_nonce, FullClient, TransactionPool}; +use node_primitives::AccountId; +use node_runtime::{constants::currency::*, BalancesCall, SudoCall}; +use sc_client_api::execution_extensions::ExecutionStrategies; +use sc_service::{ + config::{ + DatabaseSource, KeepBlocks, KeystoreConfig, NetworkConfiguration, OffchainWorkerConfig, + PruningMode, TransactionPoolOptions, TransactionStorageMode, WasmExecutionMethod, + }, + BasePath, Configuration, Role, +}; +use sc_transaction_pool::PoolLimit; +use sc_transaction_pool_api::{TransactionPool as _, TransactionSource, TransactionStatus}; +use sp_core::{crypto::Pair, sr25519}; +use sp_keyring::Sr25519Keyring; +use sp_runtime::{generic::BlockId, OpaqueExtrinsic}; +use tokio::runtime::Handle; + +fn new_node(tokio_handle: Handle) -> node_cli::service::NewFullBase { + let base_path = BasePath::new_temp_dir().expect("Creates base path"); + let root = base_path.path().to_path_buf(); + + let network_config = NetworkConfiguration::new( + Sr25519Keyring::Alice.to_seed(), + "network/test/0.1", + Default::default(), + None, + ); + + let spec = Box::new(node_cli::chain_spec::development_config()); + + let config = Configuration { + impl_name: "BenchmarkImpl".into(), + impl_version: "1.0".into(), + role: Role::Authority, + tokio_handle, + transaction_pool: TransactionPoolOptions { + ready: PoolLimit { count: 100_000, total_bytes: 100 * 1024 * 1024 }, + future: PoolLimit { count: 100_000, total_bytes: 100 * 1024 * 1024 }, + reject_future_transactions: false, + }, + network: network_config, + keystore: KeystoreConfig::InMemory, + keystore_remote: Default::default(), + database: DatabaseSource::RocksDb { path: root.join("db"), cache_size: 128 }, + state_cache_size: 67108864, + state_cache_child_ratio: None, + state_pruning: PruningMode::ArchiveAll, + keep_blocks: KeepBlocks::All, + transaction_storage: TransactionStorageMode::BlockBody, + chain_spec: spec, + wasm_method: WasmExecutionMethod::Interpreted, + // NOTE: we enforce the use of the native runtime to make the errors more debuggable + execution_strategies: ExecutionStrategies { + syncing: sc_client_api::ExecutionStrategy::NativeWhenPossible, + importing: sc_client_api::ExecutionStrategy::NativeWhenPossible, + block_construction: sc_client_api::ExecutionStrategy::NativeWhenPossible, + offchain_worker: sc_client_api::ExecutionStrategy::NativeWhenPossible, + other: sc_client_api::ExecutionStrategy::NativeWhenPossible, + }, + rpc_http: None, + rpc_ws: None, + rpc_ipc: None, + rpc_ws_max_connections: None, + rpc_cors: None, + rpc_methods: Default::default(), + rpc_max_payload: None, + ws_max_out_buffer_capacity: None, + prometheus_config: None, + telemetry_endpoints: None, + default_heap_pages: None, + offchain_worker: OffchainWorkerConfig { enabled: true, indexing_enabled: false }, + force_authoring: false, + disable_grandpa: false, + dev_key_seed: Some(Sr25519Keyring::Alice.to_seed()), + tracing_targets: None, + tracing_receiver: Default::default(), + max_runtime_instances: 8, + announce_block: true, + base_path: Some(base_path), + informant_output_format: Default::default(), + wasm_runtime_overrides: None, + }; + + node_cli::service::new_full_base(config, |_, _| ()).expect("Creates node") +} + +fn create_accounts(num: usize) -> Vec { + (0..num) + .map(|i| { + Pair::from_string(&format!("{}/{}", Sr25519Keyring::Alice.to_seed(), i), None) + .expect("Creates account pair") + }) + .collect() +} + +/// Create the extrinsics that will initialize the accounts from the sudo account (Alice). +/// +/// `start_nonce` is the current nonce of Alice. +fn create_account_extrinsics( + client: &FullClient, + accounts: &[sr25519::Pair], +) -> Vec { + let start_nonce = fetch_nonce(client, Sr25519Keyring::Alice.pair()); + + accounts + .iter() + .enumerate() + .map(|(i, a)| { + vec![ + // Reset the nonce by removing any funds + create_extrinsic( + client, + Sr25519Keyring::Alice.pair(), + SudoCall::sudo { + call: Box::new( + BalancesCall::set_balance { + who: AccountId::from(a.public()).into(), + new_free: 0, + new_reserved: 0, + } + .into(), + ), + }, + Some(start_nonce + (i as u32) * 2), + ), + // Give back funds + create_extrinsic( + client, + Sr25519Keyring::Alice.pair(), + SudoCall::sudo { + call: Box::new( + BalancesCall::set_balance { + who: AccountId::from(a.public()).into(), + new_free: 1_000_000 * DOLLARS, + new_reserved: 0, + } + .into(), + ), + }, + Some(start_nonce + (i as u32) * 2 + 1), + ), + ] + }) + .flatten() + .map(OpaqueExtrinsic::from) + .collect() +} + +fn create_benchmark_extrinsics( + client: &FullClient, + accounts: &[sr25519::Pair], + extrinsics_per_account: usize, +) -> Vec { + accounts + .iter() + .map(|account| { + (0..extrinsics_per_account).map(move |nonce| { + create_extrinsic( + client, + account.clone(), + BalancesCall::transfer { + dest: Sr25519Keyring::Bob.to_account_id().into(), + value: 1 * DOLLARS, + }, + Some(nonce as u32), + ) + }) + }) + .flatten() + .map(OpaqueExtrinsic::from) + .collect() +} + +async fn submit_tx_and_wait_for_inclusion( + tx_pool: &TransactionPool, + tx: OpaqueExtrinsic, + client: &FullClient, + wait_for_finalized: bool, +) { + let best_hash = client.chain_info().best_hash; + + let mut watch = tx_pool + .submit_and_watch(&BlockId::Hash(best_hash), TransactionSource::External, tx.clone()) + .await + .expect("Submits tx to pool") + .fuse(); + + loop { + match watch.select_next_some().await { + TransactionStatus::Finalized(_) => break, + TransactionStatus::InBlock(_) if !wait_for_finalized => break, + _ => {}, + } + } +} + +fn transaction_pool_benchmarks(c: &mut Criterion) { + sp_tracing::try_init_simple(); + + let runtime = tokio::runtime::Runtime::new().expect("Creates tokio runtime"); + let tokio_handle = runtime.handle().clone(); + + let node = new_node(tokio_handle.clone()); + + let account_num = 10; + let extrinsics_per_account = 2000; + let accounts = create_accounts(account_num); + + let mut group = c.benchmark_group("Transaction pool"); + + group.sample_size(10); + group.throughput(Throughput::Elements(account_num as u64 * extrinsics_per_account as u64)); + + let mut counter = 1; + group.bench_function( + format!("{} transfers from {} accounts", account_num * extrinsics_per_account, account_num), + move |b| { + b.iter_batched( + || { + let prepare_extrinsics = create_account_extrinsics(&*node.client, &accounts); + + runtime.block_on(future::join_all(prepare_extrinsics.into_iter().map(|tx| { + submit_tx_and_wait_for_inclusion( + &node.transaction_pool, + tx, + &*node.client, + true, + ) + }))); + + create_benchmark_extrinsics(&*node.client, &accounts, extrinsics_per_account) + }, + |extrinsics| { + runtime.block_on(future::join_all(extrinsics.into_iter().map(|tx| { + submit_tx_and_wait_for_inclusion( + &node.transaction_pool, + tx, + &*node.client, + false, + ) + }))); + + println!("Finished {}", counter); + counter += 1; + }, + BatchSize::SmallInput, + ) + }, + ); +} + +criterion_group!(benches, transaction_pool_benchmarks); +criterion_main!(benches); diff --git a/bin/node/cli/src/chain_spec.rs b/bin/node/cli/src/chain_spec.rs index 352e007a891ba..b5e36d9b53629 100644 --- a/bin/node/cli/src/chain_spec.rs +++ b/bin/node/cli/src/chain_spec.rs @@ -265,7 +265,7 @@ pub fn testnet_genesis( .map(|x| &x.0) .chain(initial_nominators.iter()) .for_each(|x| { - if !endowed_accounts.contains(&x) { + if !endowed_accounts.contains(x) { endowed_accounts.push(x.clone()) } }); @@ -361,6 +361,7 @@ pub fn testnet_genesis( max_members: 999, }, vesting: Default::default(), + assets: Default::default(), gilt: Default::default(), transaction_storage: Default::default(), } diff --git a/bin/node/cli/src/lib.rs b/bin/node/cli/src/lib.rs index 1a4c1b0eab8db..ae851c6cdf628 100644 --- a/bin/node/cli/src/lib.rs +++ b/bin/node/cli/src/lib.rs @@ -33,7 +33,7 @@ pub mod chain_spec; #[macro_use] -mod service; +pub mod service; #[cfg(feature = "cli")] mod cli; #[cfg(feature = "cli")] diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index acc7df5b1e5a3..938f359368181 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -20,20 +20,25 @@ //! Service implementation. Specialized wrapper over substrate service. +use codec::Encode; +use frame_system_rpc_runtime_api::AccountNonceApi; use futures::prelude::*; use node_executor::ExecutorDispatch; use node_primitives::Block; use node_runtime::RuntimeApi; -use sc_client_api::{ExecutorProvider, RemoteBackend}; +use sc_client_api::{BlockBackend, ExecutorProvider, RemoteBackend}; use sc_consensus_babe::{self, SlotProportion}; use sc_executor::NativeElseWasmExecutor; use sc_network::{Event, NetworkService}; use sc_service::{config::Configuration, error::Error as ServiceError, RpcHandlers, TaskManager}; use sc_telemetry::{Telemetry, TelemetryWorker}; -use sp_runtime::traits::Block as BlockT; +use sp_api::ProvideRuntimeApi; +use sp_core::crypto::Pair; +use sp_runtime::{generic, traits::Block as BlockT, SaturatedConversion}; use std::sync::Arc; -type FullClient = +/// The full client type definition. +pub type FullClient = sc_service::TFullClient>; type FullBackend = sc_service::TFullBackend; type FullSelectChain = sc_consensus::LongestChain; @@ -41,7 +46,80 @@ type FullGrandpaBlockImport = grandpa::GrandpaBlockImport; type LightClient = sc_service::TLightClient>; +/// The transaction pool type defintion. +pub type TransactionPool = sc_transaction_pool::FullPool; + +/// Fetch the nonce of the given `account` from the chain state. +/// +/// Note: Should only be used for tests. +pub fn fetch_nonce(client: &FullClient, account: sp_core::sr25519::Pair) -> u32 { + let best_hash = client.chain_info().best_hash; + client + .runtime_api() + .account_nonce(&generic::BlockId::Hash(best_hash), account.public().into()) + .expect("Fetching account nonce works; qed") +} + +/// Create a transaction using the given `call`. +/// +/// The transaction will be signed by `sender`. If `nonce` is `None` it will be fetched from the +/// state of the best block. +/// +/// Note: Should only be used for tests. +pub fn create_extrinsic( + client: &FullClient, + sender: sp_core::sr25519::Pair, + function: impl Into, + nonce: Option, +) -> node_runtime::UncheckedExtrinsic { + let function = function.into(); + let genesis_hash = client.block_hash(0).ok().flatten().expect("Genesis block exists; qed"); + let best_hash = client.chain_info().best_hash; + let best_block = client.chain_info().best_number; + let nonce = nonce.unwrap_or_else(|| fetch_nonce(client, sender.clone())); + + let period = node_runtime::BlockHashCount::get() + .checked_next_power_of_two() + .map(|c| c / 2) + .unwrap_or(2) as u64; + let tip = 0; + let extra: node_runtime::SignedExtra = ( + frame_system::CheckSpecVersion::::new(), + frame_system::CheckTxVersion::::new(), + frame_system::CheckGenesis::::new(), + frame_system::CheckEra::::from(generic::Era::mortal( + period, + best_block.saturated_into(), + )), + frame_system::CheckNonce::::from(nonce), + frame_system::CheckWeight::::new(), + pallet_transaction_payment::ChargeTransactionPayment::::from(tip), + ); + let raw_payload = node_runtime::SignedPayload::from_raw( + function.clone(), + extra.clone(), + ( + node_runtime::VERSION.spec_version, + node_runtime::VERSION.transaction_version, + genesis_hash, + best_hash, + (), + (), + (), + ), + ); + let signature = raw_payload.using_encoded(|e| sender.sign(e)); + + node_runtime::UncheckedExtrinsic::new_signed( + function.clone(), + sp_runtime::AccountId32::from(sender.public()).into(), + node_runtime::Signature::Sr25519(signature.clone()), + extra.clone(), + ) +} + +/// Creates a new partial node. pub fn new_partial( config: &Configuration, ) -> Result< @@ -86,7 +164,7 @@ pub fn new_partial( let (client, backend, keystore_container, task_manager) = sc_service::new_full_parts::( - &config, + config, telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()), executor, )?; @@ -211,11 +289,16 @@ pub fn new_partial( }) } +/// Result of [`new_full_base`]. pub struct NewFullBase { + /// The task manager of the node. pub task_manager: TaskManager, + /// The client instance of the node. pub client: Arc, + /// The networking service of the node. pub network: Arc::Hash>>, - pub transaction_pool: Arc>, + /// The transaction pool of the node. + pub transaction_pool: Arc, } /// Creates a full service from the configuration. @@ -244,6 +327,7 @@ pub fn new_full_base( let warp_sync = Arc::new(grandpa::warp_proof::NetworkProvider::new( backend.clone(), import_setup.1.shared_authority_set().clone(), + Vec::default(), )); let (network, system_rpc_tx, network_starter) = @@ -277,7 +361,7 @@ pub fn new_full_base( let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams { config, - backend: backend.clone(), + backend, client: client.clone(), keystore: keystore_container.sync_keystore(), network: network.clone(), @@ -432,6 +516,7 @@ pub fn new_full(config: Configuration) -> Result { new_full_base(config, |_, _| ()).map(|NewFullBase { task_manager, .. }| task_manager) } +/// Creates a light service from the configuration. pub fn new_light_base( mut config: Configuration, ) -> Result< @@ -507,7 +592,7 @@ pub fn new_light_base( babe_block_import, Some(Box::new(justification_import)), client.clone(), - select_chain.clone(), + select_chain, move |_, ()| async move { let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); @@ -531,6 +616,7 @@ pub fn new_light_base( let warp_sync = Arc::new(grandpa::warp_proof::NetworkProvider::new( backend.clone(), grandpa_link.shared_authority_set().clone(), + Vec::default(), )); let (network, system_rpc_tx, network_starter) = diff --git a/bin/node/cli/tests/check_block_works.rs b/bin/node/cli/tests/check_block_works.rs index 707fd217e33e8..216bcc6d9fc13 100644 --- a/bin/node/cli/tests/check_block_works.rs +++ b/bin/node/cli/tests/check_block_works.rs @@ -24,11 +24,11 @@ use tempfile::tempdir; pub mod common; -#[test] -fn check_block_works() { +#[tokio::test] +async fn check_block_works() { let base_path = tempdir().expect("could not create a temp dir"); - common::run_node_for_a_while(base_path.path(), &["--dev"]); + common::run_node_for_a_while(base_path.path(), &["--dev"]).await; let status = Command::new(cargo_bin("substrate")) .args(&["check-block", "--dev", "--pruning", "archive", "-d"]) diff --git a/bin/node/cli/tests/common.rs b/bin/node/cli/tests/common.rs index 54b9c749bf1de..85effc858e155 100644 --- a/bin/node/cli/tests/common.rs +++ b/bin/node/cli/tests/common.rs @@ -23,58 +23,115 @@ use nix::{ sys::signal::{kill, Signal::SIGINT}, unistd::Pid, }; +use node_primitives::Block; +use remote_externalities::rpc_api; use std::{ convert::TryInto, + ops::{Deref, DerefMut}, path::Path, process::{Child, Command, ExitStatus}, - thread, time::Duration, }; +use tokio::time::timeout; + +static LOCALHOST_WS: &str = "ws://127.0.0.1:9944/"; /// Wait for the given `child` the given number of `secs`. /// /// Returns the `Some(exit status)` or `None` if the process did not finish in the given time. -pub fn wait_for(child: &mut Child, secs: usize) -> Option { - for i in 0..secs { - match child.try_wait().unwrap() { - Some(status) => { - if i > 5 { - eprintln!("Child process took {} seconds to exit gracefully", i); - } - return Some(status) - }, - None => thread::sleep(Duration::from_secs(1)), +pub fn wait_for(child: &mut Child, secs: u64) -> Result { + let result = wait_timeout::ChildExt::wait_timeout(child, Duration::from_secs(5.min(secs))) + .map_err(|_| ())?; + if let Some(exit_status) = result { + Ok(exit_status) + } else { + if secs > 5 { + eprintln!("Child process taking over 5 seconds to exit gracefully"); + let result = wait_timeout::ChildExt::wait_timeout(child, Duration::from_secs(secs - 5)) + .map_err(|_| ())?; + if let Some(exit_status) = result { + return Ok(exit_status) + } } + eprintln!("Took too long to exit (> {} seconds). Killing...", secs); + let _ = child.kill(); + child.wait().unwrap(); + Err(()) } - eprintln!("Took too long to exit (> {} seconds). Killing...", secs); - let _ = child.kill(); - child.wait().unwrap(); +} + +/// Wait for at least n blocks to be finalized within a specified time. +pub async fn wait_n_finalized_blocks( + n: usize, + timeout_secs: u64, +) -> Result<(), tokio::time::error::Elapsed> { + timeout(Duration::from_secs(timeout_secs), wait_n_finalized_blocks_from(n, LOCALHOST_WS)).await +} + +/// Wait for at least n blocks to be finalized from a specified node +pub async fn wait_n_finalized_blocks_from(n: usize, url: &str) { + let mut built_blocks = std::collections::HashSet::new(); + let mut interval = tokio::time::interval(Duration::from_secs(2)); - None + loop { + if let Ok(block) = rpc_api::get_finalized_head::(url.to_string()).await { + built_blocks.insert(block); + if built_blocks.len() > n { + break + } + }; + interval.tick().await; + } } -/// Run the node for a while (30 seconds) -pub fn run_node_for_a_while(base_path: &Path, args: &[&str]) { +/// Run the node for a while (3 blocks) +pub async fn run_node_for_a_while(base_path: &Path, args: &[&str]) { let mut cmd = Command::new(cargo_bin("substrate")); - let mut cmd = cmd.args(args).arg("-d").arg(base_path).spawn().unwrap(); + let mut child = KillChildOnDrop(cmd.args(args).arg("-d").arg(base_path).spawn().unwrap()); // Let it produce some blocks. - thread::sleep(Duration::from_secs(30)); - assert!(cmd.try_wait().unwrap().is_none(), "the process should still be running"); + let _ = wait_n_finalized_blocks(3, 30).await; + + assert!(child.try_wait().unwrap().is_none(), "the process should still be running"); // Stop the process - kill(Pid::from_raw(cmd.id().try_into().unwrap()), SIGINT).unwrap(); - assert!(wait_for(&mut cmd, 40).map(|x| x.success()).unwrap_or_default()); + kill(Pid::from_raw(child.id().try_into().unwrap()), SIGINT).unwrap(); + assert!(wait_for(&mut child, 40).map(|x| x.success()).unwrap()); } /// Run the node asserting that it fails with an error pub fn run_node_assert_fail(base_path: &Path, args: &[&str]) { let mut cmd = Command::new(cargo_bin("substrate")); - let mut cmd = cmd.args(args).arg("-d").arg(base_path).spawn().unwrap(); + let mut child = KillChildOnDrop(cmd.args(args).arg("-d").arg(base_path).spawn().unwrap()); - // Let it produce some blocks. - thread::sleep(Duration::from_secs(10)); - assert!(cmd.try_wait().unwrap().is_some(), "the process should not be running anymore"); + // Let it produce some blocks, but it should die within 10 seconds. + assert_ne!( + wait_timeout::ChildExt::wait_timeout(&mut *child, Duration::from_secs(10)).unwrap(), + None, + "the process should not be running anymore" + ); +} + +pub struct KillChildOnDrop(pub Child); + +impl Drop for KillChildOnDrop { + fn drop(&mut self) { + let _ = self.0.kill(); + } +} + +impl Deref for KillChildOnDrop { + type Target = Child; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for KillChildOnDrop { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } } diff --git a/bin/node/cli/tests/database_role_subdir_migration.rs b/bin/node/cli/tests/database_role_subdir_migration.rs index 516908111ae72..9338d8a8e4f43 100644 --- a/bin/node/cli/tests/database_role_subdir_migration.rs +++ b/bin/node/cli/tests/database_role_subdir_migration.rs @@ -25,9 +25,9 @@ use tempfile::tempdir; pub mod common; -#[test] +#[tokio::test] #[cfg(unix)] -fn database_role_subdir_migration() { +async fn database_role_subdir_migration() { type Block = RawBlock>; let base_path = tempdir().expect("could not create a temp dir"); @@ -62,7 +62,8 @@ fn database_role_subdir_migration() { "44445", "--no-prometheus", ], - ); + ) + .await; // check if the database dir had been migrated assert!(!path.join("db_version").exists()); diff --git a/bin/node/cli/tests/export_import_flow.rs b/bin/node/cli/tests/export_import_flow.rs index 7cbaa152699b4..937f03b8e5dae 100644 --- a/bin/node/cli/tests/export_import_flow.rs +++ b/bin/node/cli/tests/export_import_flow.rs @@ -182,13 +182,13 @@ impl<'a> ExportImportRevertExecutor<'a> { } } -#[test] -fn export_import_revert() { +#[tokio::test] +async fn export_import_revert() { let base_path = tempdir().expect("could not create a temp dir"); let exported_blocks_file = base_path.path().join("exported_blocks"); let db_path = base_path.path().join("db"); - common::run_node_for_a_while(base_path.path(), &["--dev"]); + common::run_node_for_a_while(base_path.path(), &["--dev"]).await; let mut executor = ExportImportRevertExecutor::new(&base_path, &exported_blocks_file, &db_path); diff --git a/bin/node/cli/tests/inspect_works.rs b/bin/node/cli/tests/inspect_works.rs index 2a89801547a4b..6f980d2acbfcb 100644 --- a/bin/node/cli/tests/inspect_works.rs +++ b/bin/node/cli/tests/inspect_works.rs @@ -24,11 +24,11 @@ use tempfile::tempdir; pub mod common; -#[test] -fn inspect_works() { +#[tokio::test] +async fn inspect_works() { let base_path = tempdir().expect("could not create a temp dir"); - common::run_node_for_a_while(base_path.path(), &["--dev"]); + common::run_node_for_a_while(base_path.path(), &["--dev"]).await; let status = Command::new(cargo_bin("substrate")) .args(&["inspect", "--dev", "--pruning", "archive", "-d"]) diff --git a/bin/node/cli/tests/purge_chain_works.rs b/bin/node/cli/tests/purge_chain_works.rs index 0f16a51e5d0a4..8a8601c863d95 100644 --- a/bin/node/cli/tests/purge_chain_works.rs +++ b/bin/node/cli/tests/purge_chain_works.rs @@ -22,12 +22,12 @@ use tempfile::tempdir; pub mod common; -#[test] +#[tokio::test] #[cfg(unix)] -fn purge_chain_works() { +async fn purge_chain_works() { let base_path = tempdir().expect("could not create a temp dir"); - common::run_node_for_a_while(base_path.path(), &["--dev"]); + common::run_node_for_a_while(base_path.path(), &["--dev"]).await; let status = Command::new(cargo_bin("substrate")) .args(&["purge-chain", "--dev", "-d"]) diff --git a/bin/node/cli/tests/running_the_node_and_interrupt.rs b/bin/node/cli/tests/running_the_node_and_interrupt.rs index 03a1826f2f080..fc5094c2d722f 100644 --- a/bin/node/cli/tests/running_the_node_and_interrupt.rs +++ b/bin/node/cli/tests/running_the_node_and_interrupt.rs @@ -17,7 +17,6 @@ // along with this program. If not, see . #![cfg(unix)] - use assert_cmd::cargo::cargo_bin; use nix::{ sys::signal::{ @@ -26,67 +25,43 @@ use nix::{ }, unistd::Pid, }; -use sc_service::Deref; use std::{ convert::TryInto, - ops::DerefMut, process::{Child, Command}, - thread, - time::Duration, }; use tempfile::tempdir; pub mod common; -#[test] -fn running_the_node_works_and_can_be_interrupted() { - fn run_command_and_kill(signal: Signal) { +#[tokio::test] +async fn running_the_node_works_and_can_be_interrupted() { + async fn run_command_and_kill(signal: Signal) { let base_path = tempdir().expect("could not create a temp dir"); - let mut cmd = Command::new(cargo_bin("substrate")) - .args(&["--dev", "-d"]) - .arg(base_path.path()) - .spawn() - .unwrap(); + let mut cmd = common::KillChildOnDrop( + Command::new(cargo_bin("substrate")) + .args(&["--dev", "-d"]) + .arg(base_path.path()) + .spawn() + .unwrap(), + ); - thread::sleep(Duration::from_secs(20)); + common::wait_n_finalized_blocks(3, 30).await.unwrap(); assert!(cmd.try_wait().unwrap().is_none(), "the process should still be running"); kill(Pid::from_raw(cmd.id().try_into().unwrap()), signal).unwrap(); assert_eq!( common::wait_for(&mut cmd, 30).map(|x| x.success()), - Some(true), + Ok(true), "the process must exit gracefully after signal {}", signal, ); } - run_command_and_kill(SIGINT); - run_command_and_kill(SIGTERM); -} - -struct KillChildOnDrop(Child); - -impl Drop for KillChildOnDrop { - fn drop(&mut self) { - let _ = self.0.kill(); - } -} - -impl Deref for KillChildOnDrop { - type Target = Child; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -impl DerefMut for KillChildOnDrop { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.0 - } + run_command_and_kill(SIGINT).await; + run_command_and_kill(SIGTERM).await; } -#[test] -fn running_two_nodes_with_the_same_ws_port_should_work() { +#[tokio::test] +async fn running_two_nodes_with_the_same_ws_port_should_work() { fn start_node() -> Child { Command::new(cargo_bin("substrate")) .args(&["--dev", "--tmp", "--ws-port=45789"]) @@ -94,10 +69,10 @@ fn running_two_nodes_with_the_same_ws_port_should_work() { .unwrap() } - let mut first_node = KillChildOnDrop(start_node()); - let mut second_node = KillChildOnDrop(start_node()); + let mut first_node = common::KillChildOnDrop(start_node()); + let mut second_node = common::KillChildOnDrop(start_node()); - thread::sleep(Duration::from_secs(30)); + let _ = common::wait_n_finalized_blocks(3, 30).await; assert!(first_node.try_wait().unwrap().is_none(), "The first node should still be running"); assert!(second_node.try_wait().unwrap().is_none(), "The second node should still be running"); @@ -107,12 +82,12 @@ fn running_two_nodes_with_the_same_ws_port_should_work() { assert_eq!( common::wait_for(&mut first_node, 30).map(|x| x.success()), - Some(true), + Ok(true), "The first node must exit gracefully", ); assert_eq!( common::wait_for(&mut second_node, 30).map(|x| x.success()), - Some(true), + Ok(true), "The second node must exit gracefully", ); } diff --git a/bin/node/cli/tests/temp_base_path_works.rs b/bin/node/cli/tests/temp_base_path_works.rs index c107740b9b0a5..5d8e6c9ec4539 100644 --- a/bin/node/cli/tests/temp_base_path_works.rs +++ b/bin/node/cli/tests/temp_base_path_works.rs @@ -29,37 +29,34 @@ use std::{ io::Read, path::PathBuf, process::{Command, Stdio}, - thread, - time::Duration, }; pub mod common; -#[test] -fn temp_base_path_works() { +#[tokio::test] +async fn temp_base_path_works() { let mut cmd = Command::new(cargo_bin("substrate")); - - let mut cmd = cmd - .args(&["--dev", "--tmp"]) - .stdout(Stdio::piped()) - .stderr(Stdio::piped()) - .spawn() - .unwrap(); + let mut child = common::KillChildOnDrop( + cmd.args(&["--dev", "--tmp"]) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .unwrap(), + ); // Let it produce some blocks. - thread::sleep(Duration::from_secs(30)); - assert!(cmd.try_wait().unwrap().is_none(), "the process should still be running"); + common::wait_n_finalized_blocks(3, 30).await.unwrap(); + assert!(child.try_wait().unwrap().is_none(), "the process should still be running"); // Stop the process - kill(Pid::from_raw(cmd.id().try_into().unwrap()), SIGINT).unwrap(); - assert!(common::wait_for(&mut cmd, 40).map(|x| x.success()).unwrap_or_default()); + kill(Pid::from_raw(child.id().try_into().unwrap()), SIGINT).unwrap(); + assert!(common::wait_for(&mut child, 40).map(|x| x.success()).unwrap_or_default()); // Ensure the database has been deleted let mut stderr = String::new(); - cmd.stderr.unwrap().read_to_string(&mut stderr).unwrap(); + child.stderr.as_mut().unwrap().read_to_string(&mut stderr).unwrap(); let re = Regex::new(r"Database: .+ at (\S+)").unwrap(); - let db_path = - PathBuf::from(re.captures(stderr.as_str()).unwrap().get(1).unwrap().as_str().to_string()); + let db_path = PathBuf::from(re.captures(stderr.as_str()).unwrap().get(1).unwrap().as_str()); assert!(!db_path.exists()); } diff --git a/bin/node/executor/Cargo.toml b/bin/node/executor/Cargo.toml index f283a913915f3..462bb034610e9 100644 --- a/bin/node/executor/Cargo.toml +++ b/bin/node/executor/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] description = "Substrate node implementation in Rust." edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" [package.metadata.docs.rs] diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index c1ab5e5a0fe13..bbb9339189b06 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -385,6 +385,11 @@ fn full_native_block_import_works() { })), topics: vec![], }, + EventRecord { + phase: Phase::ApplyExtrinsic(1), + event: Event::Balances(pallet_balances::Event::Withdraw(alice().into(), fees)), + topics: vec![], + }, EventRecord { phase: Phase::ApplyExtrinsic(1), event: Event::Balances(pallet_balances::Event::Transfer( @@ -394,6 +399,14 @@ fn full_native_block_import_works() { )), topics: vec![], }, + EventRecord { + phase: Phase::ApplyExtrinsic(1), + event: Event::Balances(pallet_balances::Event::Deposit( + pallet_treasury::Pallet::::account_id(), + fees * 8 / 10, + )), + topics: vec![], + }, EventRecord { phase: Phase::ApplyExtrinsic(1), event: Event::Treasury(pallet_treasury::Event::Deposit(fees * 8 / 10)), @@ -439,6 +452,11 @@ fn full_native_block_import_works() { })), topics: vec![], }, + EventRecord { + phase: Phase::ApplyExtrinsic(1), + event: Event::Balances(pallet_balances::Event::Withdraw(bob().into(), fees)), + topics: vec![], + }, EventRecord { phase: Phase::ApplyExtrinsic(1), event: Event::Balances(pallet_balances::Event::Transfer( @@ -448,6 +466,14 @@ fn full_native_block_import_works() { )), topics: vec![], }, + EventRecord { + phase: Phase::ApplyExtrinsic(1), + event: Event::Balances(pallet_balances::Event::Deposit( + pallet_treasury::Pallet::::account_id(), + fees * 8 / 10, + )), + topics: vec![], + }, EventRecord { phase: Phase::ApplyExtrinsic(1), event: Event::Treasury(pallet_treasury::Event::Deposit(fees * 8 / 10)), @@ -461,6 +487,11 @@ fn full_native_block_import_works() { })), topics: vec![], }, + EventRecord { + phase: Phase::ApplyExtrinsic(2), + event: Event::Balances(pallet_balances::Event::Withdraw(alice().into(), fees)), + topics: vec![], + }, EventRecord { phase: Phase::ApplyExtrinsic(2), event: Event::Balances(pallet_balances::Event::Transfer( @@ -470,6 +501,14 @@ fn full_native_block_import_works() { )), topics: vec![], }, + EventRecord { + phase: Phase::ApplyExtrinsic(2), + event: Event::Balances(pallet_balances::Event::Deposit( + pallet_treasury::Pallet::::account_id(), + fees * 8 / 10, + )), + topics: vec![], + }, EventRecord { phase: Phase::ApplyExtrinsic(2), event: Event::Treasury(pallet_treasury::Event::Deposit(fees * 8 / 10)), diff --git a/bin/node/inspect/Cargo.toml b/bin/node/inspect/Cargo.toml index 1570e5dbf8e44..6f526b896ba76 100644 --- a/bin/node/inspect/Cargo.toml +++ b/bin/node/inspect/Cargo.toml @@ -4,7 +4,7 @@ version = "0.9.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" [package.metadata.docs.rs] diff --git a/bin/node/primitives/Cargo.toml b/bin/node/primitives/Cargo.toml index 12ec57e4d55b6..07c3aca6059f6 100644 --- a/bin/node/primitives/Cargo.toml +++ b/bin/node/primitives/Cargo.toml @@ -4,7 +4,7 @@ version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" [package.metadata.docs.rs] diff --git a/bin/node/rpc-client/Cargo.toml b/bin/node/rpc-client/Cargo.toml index a5255769158a4..59695edb6fa26 100644 --- a/bin/node/rpc-client/Cargo.toml +++ b/bin/node/rpc-client/Cargo.toml @@ -4,7 +4,7 @@ version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" [package.metadata.docs.rs] diff --git a/bin/node/rpc/Cargo.toml b/bin/node/rpc/Cargo.toml index 0cb606f79f086..4c7b15459cea9 100644 --- a/bin/node/rpc/Cargo.toml +++ b/bin/node/rpc/Cargo.toml @@ -4,7 +4,7 @@ version = "3.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" [package.metadata.docs.rs] diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index d434be8f3c609..22ff0954e2458 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] edition = "2018" build = "build.rs" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" [package.metadata.docs.rs] @@ -20,7 +20,7 @@ codec = { package = "parity-scale-codec", version = "2.2.0", default-features = ] } scale-info = { version = "1.0", default-features = false, features = ["derive"] } static_assertions = "1.1.0" -hex-literal = { version = "0.3.1", optional = true } +hex-literal = { version = "0.3.3", optional = true } log = { version = "0.4.14", default-features = false } # primitives diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 4cf313f8d26f0..c7920629bf356 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -81,6 +81,8 @@ pub use pallet_balances::Call as BalancesCall; #[cfg(any(feature = "std", test))] pub use pallet_staking::StakerStatus; #[cfg(any(feature = "std", test))] +pub use pallet_sudo::Call as SudoCall; +#[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; /// Implementations of some helper traits passed into runtime modules as associated types. @@ -119,8 +121,8 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // and set impl_version to 0. If only runtime // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. - spec_version: 267, - impl_version: 1, + spec_version: 268, + impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 2, }; @@ -376,6 +378,7 @@ impl pallet_babe::Config for Runtime { pallet_babe::EquivocationHandler; type WeightInfo = (); + type MaxAuthorities = MaxAuthorities; } parameter_types! { @@ -412,6 +415,7 @@ impl pallet_balances::Config for Runtime { parameter_types! { pub const TransactionByteFee: Balance = 10 * MILLICENTS; + pub const OperationalFeeMultiplier: u8 = 5; pub const TargetBlockFullness: Perquintill = Perquintill::from_percent(25); pub AdjustmentVariable: Multiplier = Multiplier::saturating_from_rational(1, 100_000); pub MinimumMultiplier: Multiplier = Multiplier::saturating_from_rational(1, 1_000_000_000u128); @@ -420,6 +424,7 @@ parameter_types! { impl pallet_transaction_payment::Config for Runtime { type OnChargeTransaction = CurrencyAdapter; type TransactionByteFee = TransactionByteFee; + type OperationalFeeMultiplier = OperationalFeeMultiplier; type WeightToFee = IdentityFee; type FeeMultiplierUpdate = TargetedFeeAdjustment; @@ -456,10 +461,6 @@ impl_opaque_keys! { } } -parameter_types! { - pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(17); -} - impl pallet_session::Config for Runtime { type Event = Event; type ValidatorId = ::AccountId; @@ -469,7 +470,6 @@ impl pallet_session::Config for Runtime { type SessionManager = pallet_session::historical::NoteHistoricalRoot; type SessionHandler = ::KeyTypeIdProviders; type Keys = SessionKeys; - type DisabledValidatorsThreshold = DisabledValidatorsThreshold; type WeightInfo = pallet_session::weights::SubstrateWeight; } @@ -495,6 +495,7 @@ parameter_types! { pub const SlashDeferDuration: pallet_staking::EraIndex = 24 * 7; // 1/4 the bonding duration. pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; pub const MaxNominatorRewardedPerValidator: u32 = 256; + pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); pub OffchainRepeat: BlockNumber = 5; } @@ -526,6 +527,7 @@ impl pallet_staking::Config for Runtime { type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; + type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type ElectionProvider = ElectionProviderMultiPhase; type GenesisElectionProvider = onchain::OnChainSequentialPhragmen; // Alternatively, use pallet_staking::UseNominatorsMap to just use the nominators map. @@ -576,17 +578,18 @@ sp_npos_elections::generate_solution_type!( pub const MAX_NOMINATIONS: u32 = ::LIMIT as u32; -/// The numbers configured here should always be more than the the maximum limits of staking pallet -/// to ensure election snapshot will not run out of memory. +/// The numbers configured here could always be more than the the maximum limits of staking pallet +/// to ensure election snapshot will not run out of memory. For now, we set them to smaller values +/// since the staking is bounded and the weight pipeline takes hours for this single pallet. pub struct BenchmarkConfig; impl pallet_election_provider_multi_phase::BenchmarkingConfig for BenchmarkConfig { - const VOTERS: [u32; 2] = [5_000, 10_000]; - const TARGETS: [u32; 2] = [1_000, 2_000]; - const ACTIVE_VOTERS: [u32; 2] = [1000, 4_000]; - const DESIRED_TARGETS: [u32; 2] = [400, 800]; - const SNAPSHOT_MAXIMUM_VOTERS: u32 = 25_000; - const MINER_MAXIMUM_VOTERS: u32 = 15_000; - const MAXIMUM_TARGETS: u32 = 2000; + const VOTERS: [u32; 2] = [1000, 2000]; + const TARGETS: [u32; 2] = [500, 1000]; + const ACTIVE_VOTERS: [u32; 2] = [500, 800]; + const DESIRED_TARGETS: [u32; 2] = [200, 400]; + const SNAPSHOT_MAXIMUM_VOTERS: u32 = 1000; + const MINER_MAXIMUM_VOTERS: u32 = 1000; + const MAXIMUM_TARGETS: u32 = 300; } /// Maximum number of iterations for balancing that will be executed in the embedded OCW @@ -1035,6 +1038,7 @@ impl pallet_grandpa::Config for Runtime { >; type WeightInfo = (); + type MaxAuthorities = MaxAuthorities; } parameter_types! { @@ -1277,7 +1281,7 @@ construct_runtime!( Multisig: pallet_multisig::{Pallet, Call, Storage, Event}, Bounties: pallet_bounties::{Pallet, Call, Storage, Event}, Tips: pallet_tips::{Pallet, Call, Storage, Event}, - Assets: pallet_assets::{Pallet, Call, Storage, Event}, + Assets: pallet_assets::{Pallet, Call, Storage, Event, Config}, Mmr: pallet_mmr::{Pallet, Storage}, Lottery: pallet_lottery::{Pallet, Call, Storage, Event}, Gilt: pallet_gilt::{Pallet, Call, Storage, Event, Config}, @@ -1439,7 +1443,7 @@ impl_runtime_apis! { slot_duration: Babe::slot_duration(), epoch_length: EpochDuration::get(), c: BABE_GENESIS_EPOCH_CONFIG.c, - genesis_authorities: Babe::authorities(), + genesis_authorities: Babe::authorities().to_vec(), randomness: Babe::randomness(), allowed_slots: BABE_GENESIS_EPOCH_CONFIG.allowed_slots, } @@ -1723,7 +1727,6 @@ impl_runtime_apis! { add_benchmark!(params, batches, pallet_utility, Utility); add_benchmark!(params, batches, pallet_vesting, Vesting); - if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } Ok(batches) } } diff --git a/bin/node/testing/Cargo.toml b/bin/node/testing/Cargo.toml index d05d815121f88..e5d13189ac2fe 100644 --- a/bin/node/testing/Cargo.toml +++ b/bin/node/testing/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] description = "Test utilities for Substrate node." edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" publish = true diff --git a/bin/node/testing/src/genesis.rs b/bin/node/testing/src/genesis.rs index 50c1e6f9d20be..845227c5acee9 100644 --- a/bin/node/testing/src/genesis.rs +++ b/bin/node/testing/src/genesis.rs @@ -98,6 +98,7 @@ pub fn config_endowed( treasury: Default::default(), society: SocietyConfig { members: vec![alice(), bob()], pot: 0, max_members: 999 }, vesting: Default::default(), + assets: Default::default(), gilt: Default::default(), transaction_storage: Default::default(), } diff --git a/bin/utils/chain-spec-builder/Cargo.toml b/bin/utils/chain-spec-builder/Cargo.toml index 5bdf01badc3f4..bfbf2da57a6a0 100644 --- a/bin/utils/chain-spec-builder/Cargo.toml +++ b/bin/utils/chain-spec-builder/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] edition = "2018" build = "build.rs" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" publish = false diff --git a/bin/utils/subkey/Cargo.toml b/bin/utils/subkey/Cargo.toml index 9bd38a21a664b..5e7615f60d628 100644 --- a/bin/utils/subkey/Cargo.toml +++ b/bin/utils/subkey/Cargo.toml @@ -4,7 +4,7 @@ version = "2.0.1" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" publish = false diff --git a/bin/utils/subkey/README.md b/bin/utils/subkey/README.md index fbb486247a770..2310c59f4a283 100644 --- a/bin/utils/subkey/README.md +++ b/bin/utils/subkey/README.md @@ -18,15 +18,37 @@ If you save any output of `subkey` into a file, make sure to apply proper permis The following guide explains *some* of the `subkey` commands. For the full list and the most up to date documentation, make sure to check the integrated help with `subkey --help`. +### Install with Cargo + +You will need to have the Substrate build dependencies to install Subkey. Use the following two commands to install the dependencies and Subkey, respectively: + +Command: + +```bash +# Use the `--fast` flag to get the dependencies without needing to install the Substrate and Subkey binary +curl https://getsubstrate.io -sSf | bash -s -- --fast +# Install only `subkey`, at a specific version of the subkey crate +cargo install --force subkey --git https://github.com/paritytech/substrate --version --locked +``` + +### Run in a container + +```bash +# Use `--pull=always` with the `latest` tag, or specify a version in a tag +docker run -it --pull=always docker.io/parity/subkey:latest +``` + ### Generate a random account Generating a new key is as simple as running: - subkey generate +```bash +subkey generate +``` The output looks similar to: -``` +```text Secret phrase `hotel forest jar hover kite book view eight stuff angle legend defense` is account: Secret seed: 0xa05c75731970cc7868a2fb7cb577353cd5b31f62dccced92c441acd8fee0c92d Public key (hex): 0xfec70cfbf1977c6965b5af10a4534a6a35d548eb14580594d0bc543286892515 @@ -50,6 +72,7 @@ The **SS58 address** (or **Public Address**) of a new account is a reprensentati You can read more about the SS58 format in the [substrate wiki](https://github.com/paritytech/substrate/wiki/External-Address-Format-(SS58)) and see the list of reserved prefixes in the [Polkadot wiki](https://wiki.polkadot.network/docs/build-ss58-registry). For instance, considering the previous seed `0xa05c75731970cc7868a2fb7cb577353cd5b31f62dccced92c441acd8fee0c92d` the SS58 addresses are: + - Polkadot: `16m4J167Mptt8UXL8aGSAi7U2FnPpPxZHPrCgMG9KJzVoFqM` - Kusama: `JLNozAv8QeLSbLFwe2UvWeKKE4yvmDbfGxTuiYkF2BUMx4M` @@ -58,12 +81,14 @@ For instance, considering the previous seed `0xa05c75731970cc7868a2fb7cb577353cd `subkey` can calso generate the output as *json*. This is useful for automation. command: -``` + +```bash subkey generate --output-type json ``` output: -``` + +```json { "accountId": "0xfec70cfbf1977c6965b5af10a4534a6a35d548eb14580594d0bc543286892515", "publicKey": "0xfec70cfbf1977c6965b5af10a4534a6a35d548eb14580594d0bc543286892515", @@ -76,12 +101,14 @@ output: So if you only want to get the `secretSeed` for instance, you can use: command: -``` + +```bash subkey generate --output-type json | jq -r .secretSeed ``` output: -``` + +```text 0xa05c75731970cc7868a2fb7cb577353cd5b31f62dccced92c441acd8fee0c92d ``` @@ -89,10 +116,13 @@ output: `subkey` supports an additional user-defined secret that will be appended to the seed. Let's see the following example: - subkey generate --password extra_secret +```bash +subkey generate --password extra_secret +``` output: -``` + +```text Secret phrase `soup lyrics media market way crouch elevator put moon useful question wide` is account: Secret seed: 0xe7cfd179d6537a676cb94bac3b5c5c9cb1550e846ac4541040d077dfbac2e7fd Public key (hex): 0xf6a233c3e1de1a2ae0486100b460b3ce3d7231ddfe9dadabbd35ab968c70905d @@ -102,11 +132,15 @@ Secret phrase `soup lyrics media market way crouch elevator put moon useful ques Using the `inspect` command (see more details below), we see that knowning only the **secret seed** is no longer sufficient to recover the account: - subkey inspect "soup lyrics media market way crouch elevator put moon useful question wide" +```bash +subkey inspect "soup lyrics media market way crouch elevator put moon useful question wide" +``` which recovers the account `5Fe4sqj2K4fRuzEGvToi4KATqZfiDU7TqynjXG6PZE2dxwyh` and not `5He5pZpc7AJ8evPuab37vJF6KkFDqq9uDq2WXh877Qw6iaVC` as we expected. The additional user-defined **password** (`extra_secret` in our example) is now required to fully recover the account. Let's inspect the the previous mnemonic, this time passing also the required `password` as shown below: - subkey inspect --password extra_secret "soup lyrics media market way crouch elevator put moon useful question wide" +```bash +subkey inspect --password extra_secret "soup lyrics media market way crouch elevator put moon useful question wide" +``` This time, we properly recovered `5He5pZpc7AJ8evPuab37vJF6KkFDqq9uDq2WXh877Qw6iaVC`. @@ -116,23 +150,29 @@ If you have *some data* about a key, `subkey inpsect` will help you discover mor If you have **secrets** that you would like to verify for instance, you can use: - subkey inspect < mnemonic | seed > +```bash +subkey inspect < mnemonic | seed > +``` If you have only **public data**, you can see a subset of the information: - subkey inspect --public < pubkey | address > +```bash +subkey inspect --public < pubkey | address > +``` **NOTE**: While you will be able to recover the secret seed from the mnemonic, the opposite is not possible. **NOTE**: For obvious reasons, the **secrets** cannot be recovered from passing **public data** such as `pubkey` or `address` as input. command: -``` + +```bash subkey inspect 0xa05c75731970cc7868a2fb7cb577353cd5b31f62dccced92c441acd8fee0c92d ``` output: -``` + +```text Secret Key URI `0xa05c75731970cc7868a2fb7cb577353cd5b31f62dccced92c441acd8fee0c92d` is account: Secret seed: 0xa05c75731970cc7868a2fb7cb577353cd5b31f62dccced92c441acd8fee0c92d Public key (hex): 0xfec70cfbf1977c6965b5af10a4534a6a35d548eb14580594d0bc543286892515 @@ -144,17 +184,23 @@ Secret Key URI `0xa05c75731970cc7868a2fb7cb577353cd5b31f62dccced92c441acd8fee0c9 `subkey` allows using a **secret key** to sign a random message. The signature can then be verified by anyone using your **public key**: - echo -n | subkey sign --suri +```bash +echo -n | subkey sign --suri +``` example: - MESSAGE=hello - SURI=0xa05c75731970cc7868a2fb7cb577353cd5b31f62dccced92c441acd8fee0c92d - echo -n $MESSAGE | subkey sign --suri $SURI +```text +MESSAGE=hello +SURI=0xa05c75731970cc7868a2fb7cb577353cd5b31f62dccced92c441acd8fee0c92d +echo -n $MESSAGE | subkey sign --suri $SURI +``` output: - 9201af3788ad4f986b800853c79da47155f2e08fde2070d866be4c27ab060466fea0623dc2b51f4392f4c61f25381a62848dd66c5d8217fae3858e469ebd668c +```text +9201af3788ad4f986b800853c79da47155f2e08fde2070d866be4c27ab060466fea0623dc2b51f4392f4c61f25381a62848dd66c5d8217fae3858e469ebd668c +``` **NOTE**: Each run of the `sign` command will yield a different output. While each signature is different, they are all valid. @@ -162,34 +208,44 @@ output: Given a message, a signature and an address, `subkey` can verify whether the **message** has been digitally signed by the holder (or one of the holders) of the **private key** for the given **address**: - echo -n | subkey verify
+```bash +echo -n | subkey verify
+``` example: - MESSAGE=hello - URI=0xfec70cfbf1977c6965b5af10a4534a6a35d548eb14580594d0bc543286892515 - SIGNATURE=9201af3788ad4f986b800853c79da47155f2e08fde2070d866be4c27ab060466fea0623dc2b51f4392f4c61f25381a62848dd66c5d8217fae3858e469ebd668c - echo -n $MESSAGE | subkey verify $SIGNATURE $URI +```bash +MESSAGE=hello +URI=0xfec70cfbf1977c6965b5af10a4534a6a35d548eb14580594d0bc543286892515 +SIGNATURE=9201af3788ad4f986b800853c79da47155f2e08fde2070d866be4c27ab060466fea0623dc2b51f4392f4c61f25381a62848dd66c5d8217fae3858e469ebd668c +echo -n $MESSAGE | subkey verify $SIGNATURE $URI +``` output: - Signature verifies correctly. +```text +Signature verifies correctly. +``` A failure looks like: - Error: SignatureInvalid +```text +Error: SignatureInvalid +``` ### Using the vanity generator You can use the included vanity generator to find a seed that provides an address which includes the desired pattern. Be warned, depending on your hardware this may take a while. command: -``` + +```bash subkey vanity --network polkadot --pattern bob ``` output: -``` + +```text Generating key containing pattern 'bob' best: 190 == top: 189 Secret Key URI `0x8c9a73097f235b84021a446bc2826a00c690ea0be3e0d81a84931cb4146d6691` is account: diff --git a/client/allocator/Cargo.toml b/client/allocator/Cargo.toml index 5ebab6cf9d61a..9383b88f756cb 100644 --- a/client/allocator/Cargo.toml +++ b/client/allocator/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Collection of allocator implementations." documentation = "https://docs.rs/sc-allocator" diff --git a/client/api/Cargo.toml b/client/api/Cargo.toml index 772f22e822eb2..af8704058b660 100644 --- a/client/api/Cargo.toml +++ b/client/api/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Substrate client interfaces." documentation = "https://docs.rs/sc-client-api" diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index e8fce19f8124e..2f4327dfc4e4a 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -369,6 +369,7 @@ impl HeaderBackend for Blockchain { None }, number_leaves: storage.leaves.count(), + block_gap: None, } } diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index 8d5ed20730f0c..cee35a43df2f6 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] edition = "2018" build = "build.rs" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Substrate authority discovery." readme = "README.md" diff --git a/client/authority-discovery/src/worker/tests.rs b/client/authority-discovery/src/worker/tests.rs index f10d2751ccd35..3c1610256f5bc 100644 --- a/client/authority-discovery/src/worker/tests.rs +++ b/client/authority-discovery/src/worker/tests.rs @@ -73,6 +73,7 @@ impl HeaderBackend for TestApi { genesis_hash: Default::default(), number_leaves: Default::default(), finalized_state: None, + block_gap: None, } } diff --git a/client/basic-authorship/Cargo.toml b/client/basic-authorship/Cargo.toml index 469df55cf0233..1ecdb08eba489 100644 --- a/client/basic-authorship/Cargo.toml +++ b/client/basic-authorship/Cargo.toml @@ -4,7 +4,7 @@ version = "0.10.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Basic implementation of block-authoring logic." readme = "README.md" diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index 144a3ab6850ff..0055254b67091 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -286,6 +286,11 @@ where } } +/// If the block is full we will attempt to push at most +/// this number of transactions before quitting for real. +/// It allows us to increase block utilization. +const MAX_SKIPPED_TRANSACTIONS: usize = 8; + impl Proposer where A: TransactionPool, @@ -309,11 +314,6 @@ where block_size_limit: Option, ) -> Result, PR::Proof>, sp_blockchain::Error> { - /// If the block is full we will attempt to push at most - /// this number of transactions before quitting for real. - /// It allows us to increase block utilization. - const MAX_SKIPPED_TRANSACTIONS: usize = 8; - let mut block_builder = self.client.new_block_at(&self.parent_id, inherent_digests, PR::ENABLED)?; @@ -336,6 +336,9 @@ where } // proceed with transactions + // We calculate soft deadline used only in case we start skipping transactions. + let now = (self.now)(); + let soft_deadline = now + deadline.saturating_duration_since(now) / 2; let block_timer = time::Instant::now(); let mut skipped = 0; let mut unqueue_invalid = Vec::new(); @@ -344,7 +347,7 @@ where let mut t2 = futures_timer::Delay::new(deadline.saturating_duration_since((self.now)()) / 8).fuse(); - let pending_iterator = select! { + let mut pending_iterator = select! { res = t1 => res, _ = t2 => { log::warn!( @@ -363,8 +366,9 @@ where let mut transaction_pushed = false; let mut hit_block_size_limit = false; - for pending_tx in pending_iterator { - if (self.now)() > deadline { + while let Some(pending_tx) = pending_iterator.next() { + let now = (self.now)(); + if now > deadline { debug!( "Consensus deadline reached when pushing block transactions, \ proceeding with proposing." @@ -378,6 +382,7 @@ where let block_size = block_builder.estimate_block_size(self.include_proof_in_block_size_estimation); if block_size + pending_tx_data.encoded_size() > block_size_limit { + pending_iterator.report_invalid(&pending_tx); if skipped < MAX_SKIPPED_TRANSACTIONS { skipped += 1; debug!( @@ -386,6 +391,13 @@ where MAX_SKIPPED_TRANSACTIONS - skipped, ); continue + } else if now < soft_deadline { + debug!( + "Transaction would overflow the block size limit, \ + but we still have time before the soft deadline, so \ + we will try a bit more." + ); + continue } else { debug!("Reached block size limit, proceeding with proposing."); hit_block_size_limit = true; @@ -400,18 +412,25 @@ where debug!("[{:?}] Pushed to the block.", pending_tx_hash); }, Err(ApplyExtrinsicFailed(Validity(e))) if e.exhausted_resources() => { + pending_iterator.report_invalid(&pending_tx); if skipped < MAX_SKIPPED_TRANSACTIONS { skipped += 1; debug!( "Block seems full, but will try {} more transactions before quitting.", MAX_SKIPPED_TRANSACTIONS - skipped, ); + } else if (self.now)() < soft_deadline { + debug!( + "Block seems full, but we still have time before the soft deadline, \ + so we will try a bit more before quitting." + ); } else { debug!("Block is full, proceed with proposing."); break } }, Err(e) if skipped > 0 => { + pending_iterator.report_invalid(&pending_tx); trace!( "[{:?}] Ignoring invalid transaction when skipping: {}", pending_tx_hash, @@ -419,6 +438,7 @@ where ); }, Err(e) => { + pending_iterator.report_invalid(&pending_tx); debug!("[{:?}] Invalid transaction: {}", pending_tx_hash, e); unqueue_invalid.push(pending_tx_hash); }, @@ -489,6 +509,7 @@ mod tests { use sp_api::Core; use sp_blockchain::HeaderBackend; use sp_consensus::{BlockOrigin, Environment, Proposer}; + use sp_core::Pair; use sp_runtime::traits::NumberFor; use substrate_test_runtime_client::{ prelude::*, @@ -508,6 +529,19 @@ mod tests { .into_signed_tx() } + fn exhausts_resources_extrinsic_from(who: usize) -> Extrinsic { + let pair = AccountKeyring::numeric(who); + let transfer = Transfer { + // increase the amount to bump priority + amount: 1, + nonce: 0, + from: pair.public(), + to: Default::default(), + }; + let signature = pair.sign(&transfer.encode()).into(); + Extrinsic::Transfer { transfer, signature, exhaust_resources_when_not_first: true } + } + fn chain_event(header: B::Header) -> ChainEvent where NumberFor: From, @@ -553,7 +587,7 @@ mod tests { return value.1 } let old = value.1; - let new = old + time::Duration::from_secs(2); + let new = old + time::Duration::from_secs(1); *value = (true, new); old }), @@ -718,7 +752,7 @@ mod tests { ); // when - let deadline = time::Duration::from_secs(9); + let deadline = time::Duration::from_secs(900); let block = block_on(proposer.propose(Default::default(), Default::default(), deadline, None)) .map(|r| r.block) @@ -726,8 +760,8 @@ mod tests { // then // block should have some extrinsics although we have some more in the pool. - assert_eq!(block.extrinsics().len(), expected_block_extrinsics); assert_eq!(txpool.ready().count(), expected_pool_transactions); + assert_eq!(block.extrinsics().len(), expected_block_extrinsics); block }; @@ -740,6 +774,7 @@ mod tests { .expect("there should be header"), )), ); + assert_eq!(txpool.ready().count(), 7); // let's create one block and import it let block = propose_block(&client, 0, 2, 7); @@ -753,6 +788,7 @@ mod tests { .expect("there should be header"), )), ); + assert_eq!(txpool.ready().count(), 5); // now let's make sure that we can still make some progress let block = propose_block(&client, 1, 2, 5); @@ -845,4 +881,142 @@ mod tests { // block size and thus, one less transaction should fit into the limit. assert_eq!(block.extrinsics().len(), extrinsics_num - 2); } + + #[test] + fn should_keep_adding_transactions_after_exhausts_resources_before_soft_deadline() { + // given + let client = Arc::new(substrate_test_runtime_client::new()); + let spawner = sp_core::testing::TaskExecutor::new(); + let txpool = BasicPool::new_full( + Default::default(), + true.into(), + None, + spawner.clone(), + client.clone(), + ); + + block_on( + txpool.submit_at( + &BlockId::number(0), + SOURCE, + // add 2 * MAX_SKIPPED_TRANSACTIONS that exhaust resources + (0..MAX_SKIPPED_TRANSACTIONS * 2) + .into_iter() + .map(|i| exhausts_resources_extrinsic_from(i)) + // and some transactions that are okay. + .chain((0..MAX_SKIPPED_TRANSACTIONS).into_iter().map(|i| extrinsic(i as _))) + .collect(), + ), + ) + .unwrap(); + + block_on( + txpool.maintain(chain_event( + client + .header(&BlockId::Number(0u64)) + .expect("header get error") + .expect("there should be header"), + )), + ); + assert_eq!(txpool.ready().count(), MAX_SKIPPED_TRANSACTIONS * 3); + + let mut proposer_factory = + ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None); + + let cell = Mutex::new(time::Instant::now()); + let proposer = proposer_factory.init_with_now( + &client.header(&BlockId::number(0)).unwrap().unwrap(), + Box::new(move || { + let mut value = cell.lock(); + let old = *value; + *value = old + time::Duration::from_secs(1); + old + }), + ); + + // when + // give it enough time so that deadline is never triggered. + let deadline = time::Duration::from_secs(900); + let block = + block_on(proposer.propose(Default::default(), Default::default(), deadline, None)) + .map(|r| r.block) + .unwrap(); + + // then block should have all non-exhaust resources extrinsics (+ the first one). + assert_eq!(block.extrinsics().len(), MAX_SKIPPED_TRANSACTIONS + 1); + } + + #[test] + fn should_only_skip_up_to_some_limit_after_soft_deadline() { + // given + let client = Arc::new(substrate_test_runtime_client::new()); + let spawner = sp_core::testing::TaskExecutor::new(); + let txpool = BasicPool::new_full( + Default::default(), + true.into(), + None, + spawner.clone(), + client.clone(), + ); + + block_on( + txpool.submit_at( + &BlockId::number(0), + SOURCE, + (0..MAX_SKIPPED_TRANSACTIONS + 2) + .into_iter() + .map(|i| exhausts_resources_extrinsic_from(i)) + // and some transactions that are okay. + .chain((0..MAX_SKIPPED_TRANSACTIONS).into_iter().map(|i| extrinsic(i as _))) + .collect(), + ), + ) + .unwrap(); + + block_on( + txpool.maintain(chain_event( + client + .header(&BlockId::Number(0u64)) + .expect("header get error") + .expect("there should be header"), + )), + ); + assert_eq!(txpool.ready().count(), MAX_SKIPPED_TRANSACTIONS * 2 + 2); + + let mut proposer_factory = + ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None); + + let deadline = time::Duration::from_secs(600); + let cell = Arc::new(Mutex::new((0, time::Instant::now()))); + let cell2 = cell.clone(); + let proposer = proposer_factory.init_with_now( + &client.header(&BlockId::number(0)).unwrap().unwrap(), + Box::new(move || { + let mut value = cell.lock(); + let (called, old) = *value; + // add time after deadline is calculated internally (hence 1) + let increase = if called == 1 { + // we start after the soft_deadline should have already been reached. + deadline / 2 + } else { + // but we make sure to never reach the actual deadline + time::Duration::from_millis(0) + }; + *value = (called + 1, old + increase); + old + }), + ); + + let block = + block_on(proposer.propose(Default::default(), Default::default(), deadline, None)) + .map(|r| r.block) + .unwrap(); + + // then the block should have no transactions despite some in the pool + assert_eq!(block.extrinsics().len(), 1); + assert!( + cell2.lock().0 > MAX_SKIPPED_TRANSACTIONS, + "Not enough calls to current time, which indicates the test might have ended because of deadline, not soft deadline" + ); + } } diff --git a/client/beefy/Cargo.toml b/client/beefy/Cargo.toml new file mode 100644 index 0000000000000..d4541288a6287 --- /dev/null +++ b/client/beefy/Cargo.toml @@ -0,0 +1,38 @@ +[package] +name = "beefy-gadget" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +fnv = "1.0.6" +futures = "0.3" +log = "0.4" +parking_lot = "0.11" +thiserror = "1.0" +wasm-timer = "0.2.5" + +codec = { version = "2.2.0", package = "parity-scale-codec", features = ["derive"] } +prometheus = { version = "0.9.0", package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" } + +sp-api = { version = "4.0.0-dev", path = "../../primitives/api" } +sp-application-crypto = { version = "4.0.0-dev", path = "../../primitives/application-crypto" } +sp-arithmetic = { version = "4.0.0-dev", path = "../../primitives/arithmetic" } +sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +sp-keystore = { version = "0.10.0-dev", path = "../../primitives/keystore" } +sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } + +sc-utils = { version = "4.0.0-dev", path = "../utils" } +sc-client-api = { version = "4.0.0-dev", path = "../api" } +sc-keystore = { version = "4.0.0-dev", path = "../keystore" } +sc-network = { version = "0.10.0-dev", path = "../network" } +sc-network-gossip = { version = "0.10.0-dev", path = "../network-gossip" } + +beefy-primitives = { version = "4.0.0-dev", path = "../../primitives/beefy" } + +[dev-dependencies] +sc-network-test = { version = "0.8.0", path = "../network/test" } + +strum = { version = "0.21", features = ["derive"] } diff --git a/client/beefy/rpc/Cargo.toml b/client/beefy/rpc/Cargo.toml new file mode 100644 index 0000000000000..8af2fa3eac867 --- /dev/null +++ b/client/beefy/rpc/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "beefy-gadget-rpc" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" + +[dependencies] +futures = "0.3.16" +log = "0.4" +serde = { version = "1.0.130", features = ["derive"] } + +jsonrpc-core = "18.0.0" +jsonrpc-core-client = "18.0.0" +jsonrpc-derive = "18.0.0" +jsonrpc-pubsub = "18.0.0" + +codec = { version = "2.2.0", package = "parity-scale-codec", features = ["derive"] } + +sc-rpc = { version = "4.0.0-dev", path = "../../rpc" } + +sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } +sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } + +beefy-gadget = { version = "4.0.0-dev", path = "../." } +beefy-primitives = { version = "4.0.0-dev", path = "../../../primitives/beefy" } diff --git a/client/beefy/rpc/src/lib.rs b/client/beefy/rpc/src/lib.rs new file mode 100644 index 0000000000000..c9a09525569b8 --- /dev/null +++ b/client/beefy/rpc/src/lib.rs @@ -0,0 +1,114 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! RPC API for BEEFY. + +#![warn(missing_docs)] + +use std::sync::Arc; + +use sp_runtime::traits::Block as BlockT; + +use futures::{FutureExt, SinkExt, StreamExt}; +use jsonrpc_derive::rpc; +use jsonrpc_pubsub::{manager::SubscriptionManager, typed::Subscriber, SubscriptionId}; +use log::warn; + +use beefy_gadget::notification::BeefySignedCommitmentStream; + +mod notification; + +/// Provides RPC methods for interacting with BEEFY. +#[rpc] +pub trait BeefyApi { + /// RPC Metadata + type Metadata; + + /// Returns the block most recently finalized by BEEFY, alongside side its justification. + #[pubsub( + subscription = "beefy_justifications", + subscribe, + name = "beefy_subscribeJustifications" + )] + fn subscribe_justifications( + &self, + metadata: Self::Metadata, + subscriber: Subscriber, + ); + + /// Unsubscribe from receiving notifications about recently finalized blocks. + #[pubsub( + subscription = "beefy_justifications", + unsubscribe, + name = "beefy_unsubscribeJustifications" + )] + fn unsubscribe_justifications( + &self, + metadata: Option, + id: SubscriptionId, + ) -> jsonrpc_core::Result; +} + +/// Implements the BeefyApi RPC trait for interacting with BEEFY. +pub struct BeefyRpcHandler { + signed_commitment_stream: BeefySignedCommitmentStream, + manager: SubscriptionManager, +} + +impl BeefyRpcHandler { + /// Creates a new BeefyRpcHandler instance. + pub fn new(signed_commitment_stream: BeefySignedCommitmentStream, executor: E) -> Self + where + E: futures::task::Spawn + Send + Sync + 'static, + { + let manager = SubscriptionManager::new(Arc::new(executor)); + Self { signed_commitment_stream, manager } + } +} + +impl BeefyApi for BeefyRpcHandler +where + Block: BlockT, +{ + type Metadata = sc_rpc::Metadata; + + fn subscribe_justifications( + &self, + _metadata: Self::Metadata, + subscriber: Subscriber, + ) { + let stream = self + .signed_commitment_stream + .subscribe() + .map(|x| Ok::<_, ()>(Ok(notification::SignedCommitment::new::(x)))); + + self.manager.add(subscriber, |sink| { + stream + .forward(sink.sink_map_err(|e| warn!("Error sending notifications: {:?}", e))) + .map(|_| ()) + }); + } + + fn unsubscribe_justifications( + &self, + _metadata: Option, + id: SubscriptionId, + ) -> jsonrpc_core::Result { + Ok(self.manager.cancel(id)) + } +} diff --git a/client/beefy/rpc/src/notification.rs b/client/beefy/rpc/src/notification.rs new file mode 100644 index 0000000000000..4830d72905a98 --- /dev/null +++ b/client/beefy/rpc/src/notification.rs @@ -0,0 +1,39 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use codec::Encode; +use serde::{Deserialize, Serialize}; + +use sp_runtime::traits::Block as BlockT; + +/// An encoded signed commitment proving that the given header has been finalized. +/// The given bytes should be the SCALE-encoded representation of a +/// `beefy_primitives::SignedCommitment`. +#[derive(Clone, Serialize, Deserialize)] +pub struct SignedCommitment(sp_core::Bytes); + +impl SignedCommitment { + pub fn new( + signed_commitment: beefy_gadget::notification::SignedCommitment, + ) -> Self + where + Block: BlockT, + { + SignedCommitment(signed_commitment.encode().into()) + } +} diff --git a/client/beefy/src/error.rs b/client/beefy/src/error.rs new file mode 100644 index 0000000000000..db532d34c1e3b --- /dev/null +++ b/client/beefy/src/error.rs @@ -0,0 +1,31 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! BEEFY gadget specific errors +//! +//! Used for BEEFY gadget interal error handling only + +use std::fmt::Debug; + +#[derive(Debug, thiserror::Error, PartialEq)] +pub enum Error { + #[error("Keystore error: {0}")] + Keystore(String), + #[error("Signature error: {0}")] + Signature(String), +} diff --git a/client/beefy/src/gossip.rs b/client/beefy/src/gossip.rs new file mode 100644 index 0000000000000..d0199964b6ebf --- /dev/null +++ b/client/beefy/src/gossip.rs @@ -0,0 +1,236 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::{collections::BTreeMap, time::Duration}; + +use sc_network::PeerId; +use sc_network_gossip::{MessageIntent, ValidationResult, Validator, ValidatorContext}; +use sp_core::hashing::twox_64; +use sp_runtime::traits::{Block, Hash, Header, NumberFor}; + +use codec::{Decode, Encode}; +use log::{debug, trace}; +use parking_lot::{Mutex, RwLock}; +use wasm_timer::Instant; + +use beefy_primitives::{ + crypto::{Public, Signature}, + MmrRootHash, VoteMessage, +}; + +use crate::keystore::BeefyKeystore; + +#[cfg(test)] +#[path = "gossip_tests.rs"] +mod tests; + +// Limit BEEFY gossip by keeping only a bound number of voting rounds alive. +const MAX_LIVE_GOSSIP_ROUNDS: usize = 3; + +// Timeout for rebroadcasting messages. +const REBROADCAST_AFTER: Duration = Duration::from_secs(60 * 5); + +/// Gossip engine messages topic +pub(crate) fn topic() -> B::Hash +where + B: Block, +{ + <::Hashing as Hash>::hash(b"beefy") +} + +/// A type that represents hash of the message. +pub type MessageHash = [u8; 8]; + +type KnownVotes = BTreeMap, fnv::FnvHashSet>; + +/// BEEFY gossip validator +/// +/// Validate BEEFY gossip messages and limit the number of live BEEFY voting rounds. +/// +/// Allows messages from last [`MAX_LIVE_GOSSIP_ROUNDS`] to flow, everything else gets +/// rejected/expired. +/// +///All messaging is handled in a single BEEFY global topic. +pub(crate) struct GossipValidator +where + B: Block, +{ + topic: B::Hash, + known_votes: RwLock>, + next_rebroadcast: Mutex, +} + +impl GossipValidator +where + B: Block, +{ + pub fn new() -> GossipValidator { + GossipValidator { + topic: topic::(), + known_votes: RwLock::new(BTreeMap::new()), + next_rebroadcast: Mutex::new(Instant::now() + REBROADCAST_AFTER), + } + } + + /// Note a voting round. + /// + /// Noting `round` will keep `round` live. + /// + /// We retain the [`MAX_LIVE_GOSSIP_ROUNDS`] most **recent** voting rounds as live. + /// As long as a voting round is live, it will be gossiped to peer nodes. + pub(crate) fn note_round(&self, round: NumberFor) { + debug!(target: "beefy", "🥩 About to note round #{}", round); + + let mut live = self.known_votes.write(); + + if !live.contains_key(&round) { + live.insert(round, Default::default()); + } + + if live.len() > MAX_LIVE_GOSSIP_ROUNDS { + let to_remove = live.iter().next().map(|x| x.0).copied(); + if let Some(first) = to_remove { + live.remove(&first); + } + } + } + + fn add_known(known_votes: &mut KnownVotes, round: &NumberFor, hash: MessageHash) { + known_votes.get_mut(round).map(|known| known.insert(hash)); + } + + // Note that we will always keep the most recent unseen round alive. + // + // This is a preliminary fix and the detailed description why we are + // doing this can be found as part of the issue below + // + // https://github.com/paritytech/grandpa-bridge-gadget/issues/237 + // + fn is_live(known_votes: &KnownVotes, round: &NumberFor) -> bool { + let unseen_round = if let Some(max_known_round) = known_votes.keys().last() { + round > max_known_round + } else { + known_votes.is_empty() + }; + + known_votes.contains_key(round) || unseen_round + } + + fn is_known(known_votes: &KnownVotes, round: &NumberFor, hash: &MessageHash) -> bool { + known_votes.get(round).map(|known| known.contains(hash)).unwrap_or(false) + } +} + +impl Validator for GossipValidator +where + B: Block, +{ + fn validate( + &self, + _context: &mut dyn ValidatorContext, + sender: &PeerId, + mut data: &[u8], + ) -> ValidationResult { + if let Ok(msg) = + VoteMessage::, Public, Signature>::decode(&mut data) + { + let msg_hash = twox_64(data); + let round = msg.commitment.block_number; + + // Verify general usefulness of the message. + // We are going to discard old votes right away (without verification) + // Also we keep track of already received votes to avoid verifying duplicates. + { + let known_votes = self.known_votes.read(); + + if !GossipValidator::::is_live(&known_votes, &round) { + return ValidationResult::Discard + } + + if GossipValidator::::is_known(&known_votes, &round, &msg_hash) { + return ValidationResult::ProcessAndKeep(self.topic) + } + } + + if BeefyKeystore::verify(&msg.id, &msg.signature, &msg.commitment.encode()) { + GossipValidator::::add_known(&mut *self.known_votes.write(), &round, msg_hash); + return ValidationResult::ProcessAndKeep(self.topic) + } else { + // TODO: report peer + debug!(target: "beefy", "🥩 Bad signature on message: {:?}, from: {:?}", msg, sender); + } + } + + ValidationResult::Discard + } + + fn message_expired<'a>(&'a self) -> Box bool + 'a> { + let known_votes = self.known_votes.read(); + Box::new(move |_topic, mut data| { + let msg = match VoteMessage::, Public, Signature>::decode( + &mut data, + ) { + Ok(vote) => vote, + Err(_) => return true, + }; + + let round = msg.commitment.block_number; + let expired = !GossipValidator::::is_live(&known_votes, &round); + + trace!(target: "beefy", "🥩 Message for round #{} expired: {}", round, expired); + + expired + }) + } + + fn message_allowed<'a>( + &'a self, + ) -> Box bool + 'a> { + let do_rebroadcast = { + let now = Instant::now(); + let mut next_rebroadcast = self.next_rebroadcast.lock(); + if now >= *next_rebroadcast { + *next_rebroadcast = now + REBROADCAST_AFTER; + true + } else { + false + } + }; + + let known_votes = self.known_votes.read(); + Box::new(move |_who, intent, _topic, mut data| { + if let MessageIntent::PeriodicRebroadcast = intent { + return do_rebroadcast + } + + let msg = match VoteMessage::, Public, Signature>::decode( + &mut data, + ) { + Ok(vote) => vote, + Err(_) => return true, + }; + + let round = msg.commitment.block_number; + let allowed = GossipValidator::::is_live(&known_votes, &round); + + debug!(target: "beefy", "🥩 Message for round #{} allowed: {}", round, allowed); + + allowed + }) + } +} diff --git a/client/beefy/src/gossip_tests.rs b/client/beefy/src/gossip_tests.rs new file mode 100644 index 0000000000000..2d46b873cb7b0 --- /dev/null +++ b/client/beefy/src/gossip_tests.rs @@ -0,0 +1,182 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use sc_keystore::LocalKeystore; +use sc_network_test::Block; +use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; + +use beefy_primitives::{crypto::Signature, Commitment, MmrRootHash, VoteMessage, KEY_TYPE}; + +use crate::keystore::{tests::Keyring, BeefyKeystore}; + +use super::*; + +#[test] +fn note_round_works() { + let gv = GossipValidator::::new(); + + gv.note_round(1u64); + + let live = gv.known_votes.read(); + assert!(GossipValidator::::is_live(&live, &1u64)); + + drop(live); + + gv.note_round(3u64); + gv.note_round(7u64); + gv.note_round(10u64); + + let live = gv.known_votes.read(); + + assert_eq!(live.len(), MAX_LIVE_GOSSIP_ROUNDS); + + assert!(!GossipValidator::::is_live(&live, &1u64)); + assert!(GossipValidator::::is_live(&live, &3u64)); + assert!(GossipValidator::::is_live(&live, &7u64)); + assert!(GossipValidator::::is_live(&live, &10u64)); +} + +#[test] +fn keeps_most_recent_max_rounds() { + let gv = GossipValidator::::new(); + + gv.note_round(3u64); + gv.note_round(7u64); + gv.note_round(10u64); + gv.note_round(1u64); + + let live = gv.known_votes.read(); + + assert_eq!(live.len(), MAX_LIVE_GOSSIP_ROUNDS); + + assert!(GossipValidator::::is_live(&live, &3u64)); + assert!(!GossipValidator::::is_live(&live, &1u64)); + + drop(live); + + gv.note_round(23u64); + gv.note_round(15u64); + gv.note_round(20u64); + gv.note_round(2u64); + + let live = gv.known_votes.read(); + + assert_eq!(live.len(), MAX_LIVE_GOSSIP_ROUNDS); + + assert!(GossipValidator::::is_live(&live, &15u64)); + assert!(GossipValidator::::is_live(&live, &20u64)); + assert!(GossipValidator::::is_live(&live, &23u64)); +} + +#[test] +fn note_same_round_twice() { + let gv = GossipValidator::::new(); + + gv.note_round(3u64); + gv.note_round(7u64); + gv.note_round(10u64); + + let live = gv.known_votes.read(); + + assert_eq!(live.len(), MAX_LIVE_GOSSIP_ROUNDS); + + drop(live); + + // note round #7 again -> should not change anything + gv.note_round(7u64); + + let live = gv.known_votes.read(); + + assert_eq!(live.len(), MAX_LIVE_GOSSIP_ROUNDS); + + assert!(GossipValidator::::is_live(&live, &3u64)); + assert!(GossipValidator::::is_live(&live, &7u64)); + assert!(GossipValidator::::is_live(&live, &10u64)); +} + +struct TestContext; +impl ValidatorContext for TestContext { + fn broadcast_topic(&mut self, _topic: B::Hash, _force: bool) { + todo!() + } + + fn broadcast_message(&mut self, _topic: B::Hash, _message: Vec, _force: bool) { + todo!() + } + + fn send_message(&mut self, _who: &sc_network::PeerId, _message: Vec) { + todo!() + } + + fn send_topic(&mut self, _who: &sc_network::PeerId, _topic: B::Hash, _force: bool) { + todo!() + } +} + +fn sign_commitment( + who: &Keyring, + commitment: &Commitment, +) -> Signature { + let store: SyncCryptoStorePtr = std::sync::Arc::new(LocalKeystore::in_memory()); + SyncCryptoStore::ecdsa_generate_new(&*store, KEY_TYPE, Some(&who.to_seed())).unwrap(); + let beefy_keystore: BeefyKeystore = Some(store).into(); + + beefy_keystore.sign(&who.public(), &commitment.encode()).unwrap() +} + +#[test] +fn should_avoid_verifying_signatures_twice() { + let gv = GossipValidator::::new(); + let sender = sc_network::PeerId::random(); + let mut context = TestContext; + + let commitment = + Commitment { payload: MmrRootHash::default(), block_number: 3_u64, validator_set_id: 0 }; + + let signature = sign_commitment(&Keyring::Alice, &commitment); + + let vote = VoteMessage { commitment, id: Keyring::Alice.public(), signature }; + + gv.note_round(3u64); + gv.note_round(7u64); + gv.note_round(10u64); + + // first time the cache should be populated. + let res = gv.validate(&mut context, &sender, &vote.encode()); + + assert!(matches!(res, ValidationResult::ProcessAndKeep(_))); + assert_eq!(gv.known_votes.read().get(&vote.commitment.block_number).map(|x| x.len()), Some(1)); + + // second time we should hit the cache + let res = gv.validate(&mut context, &sender, &vote.encode()); + + assert!(matches!(res, ValidationResult::ProcessAndKeep(_))); + + // next we should quickly reject if the round is not live. + gv.note_round(11_u64); + gv.note_round(12_u64); + + assert!(!GossipValidator::::is_live( + &*gv.known_votes.read(), + &vote.commitment.block_number + )); + + let res = gv.validate(&mut context, &sender, &vote.encode()); + + assert!(matches!(res, ValidationResult::Discard)); +} diff --git a/client/beefy/src/keystore.rs b/client/beefy/src/keystore.rs new file mode 100644 index 0000000000000..88618b8a5a140 --- /dev/null +++ b/client/beefy/src/keystore.rs @@ -0,0 +1,119 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::convert::{From, TryInto}; + +use sp_application_crypto::RuntimeAppPublic; +use sp_core::keccak_256; +use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; + +use log::warn; + +use beefy_primitives::{ + crypto::{Public, Signature}, + KEY_TYPE, +}; + +use crate::error; + +#[cfg(test)] +#[path = "keystore_tests.rs"] +pub mod tests; + +/// A BEEFY specific keystore implemented as a `Newtype`. This is basically a +/// wrapper around [`sp_keystore::SyncCryptoStore`] and allows to customize +/// common cryptographic functionality. +pub(crate) struct BeefyKeystore(Option); + +impl BeefyKeystore { + /// Check if the keystore contains a private key for one of the public keys + /// contained in `keys`. A public key with a matching private key is known + /// as a local authority id. + /// + /// Return the public key for which we also do have a private key. If no + /// matching private key is found, `None` will be returned. + pub fn authority_id(&self, keys: &[Public]) -> Option { + let store = self.0.clone()?; + + // we do check for multiple private keys as a key store sanity check. + let public: Vec = keys + .iter() + .filter(|k| SyncCryptoStore::has_keys(&*store, &[(k.to_raw_vec(), KEY_TYPE)])) + .cloned() + .collect(); + + if public.len() > 1 { + warn!(target: "beefy", "🥩 Multiple private keys found for: {:?} ({})", public, public.len()); + } + + public.get(0).cloned() + } + + /// Sign `message` with the `public` key. + /// + /// Note that `message` usually will be pre-hashed before being signed. + /// + /// Return the message signature or an error in case of failure. + pub fn sign(&self, public: &Public, message: &[u8]) -> Result { + let store = self.0.clone().ok_or_else(|| error::Error::Keystore("no Keystore".into()))?; + + let msg = keccak_256(message); + let public = public.as_ref(); + + let sig = SyncCryptoStore::ecdsa_sign_prehashed(&*store, KEY_TYPE, public, &msg) + .map_err(|e| error::Error::Keystore(e.to_string()))? + .ok_or_else(|| error::Error::Signature("ecdsa_sign_prehashed() failed".to_string()))?; + + // check that `sig` has the expected result type + let sig = sig.clone().try_into().map_err(|_| { + error::Error::Signature(format!("invalid signature {:?} for key {:?}", sig, public)) + })?; + + Ok(sig) + } + + /// Returns a vector of [`beefy_primitives::crypto::Public`] keys which are currently supported + /// (i.e. found in the keystore). + pub fn public_keys(&self) -> Result, error::Error> { + let store = self.0.clone().ok_or_else(|| error::Error::Keystore("no Keystore".into()))?; + + let pk: Vec = SyncCryptoStore::ecdsa_public_keys(&*store, KEY_TYPE) + .iter() + .map(|k| Public::from(k.clone())) + .collect(); + + Ok(pk) + } + + /// Use the `public` key to verify that `sig` is a valid signature for `message`. + /// + /// Return `true` if the signature is authentic, `false` otherwise. + pub fn verify(public: &Public, sig: &Signature, message: &[u8]) -> bool { + let msg = keccak_256(message); + let sig = sig.as_ref(); + let public = public.as_ref(); + + sp_core::ecdsa::Pair::verify_prehashed(sig, &msg, public) + } +} + +impl From> for BeefyKeystore { + fn from(store: Option) -> BeefyKeystore { + BeefyKeystore(store) + } +} diff --git a/client/beefy/src/keystore_tests.rs b/client/beefy/src/keystore_tests.rs new file mode 100644 index 0000000000000..99e3e42228df2 --- /dev/null +++ b/client/beefy/src/keystore_tests.rs @@ -0,0 +1,275 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::sync::Arc; + +use sc_keystore::LocalKeystore; +use sp_core::{ecdsa, keccak_256, Pair}; +use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; + +use beefy_primitives::{crypto, KEY_TYPE}; + +use super::BeefyKeystore; +use crate::error::Error; + +/// Set of test accounts using [`beefy_primitives::crypto`] types. +#[allow(missing_docs)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, strum::Display, strum::EnumIter)] +pub(crate) enum Keyring { + Alice, + Bob, + Charlie, + Dave, + Eve, + Ferdie, + One, + Two, +} + +impl Keyring { + /// Sign `msg`. + pub fn sign(self, msg: &[u8]) -> crypto::Signature { + let msg = keccak_256(msg); + ecdsa::Pair::from(self).sign_prehashed(&msg).into() + } + + /// Return key pair. + pub fn pair(self) -> crypto::Pair { + ecdsa::Pair::from_string(self.to_seed().as_str(), None).unwrap().into() + } + + /// Return public key. + pub fn public(self) -> crypto::Public { + self.pair().public() + } + + /// Return seed string. + pub fn to_seed(self) -> String { + format!("//{}", self) + } +} + +impl From for crypto::Pair { + fn from(k: Keyring) -> Self { + k.pair() + } +} + +impl From for ecdsa::Pair { + fn from(k: Keyring) -> Self { + k.pair().into() + } +} + +fn keystore() -> SyncCryptoStorePtr { + Arc::new(LocalKeystore::in_memory()) +} + +#[test] +fn verify_should_work() { + let msg = keccak_256(b"I am Alice!"); + let sig = Keyring::Alice.sign(b"I am Alice!"); + + assert!(ecdsa::Pair::verify_prehashed( + &sig.clone().into(), + &msg, + &Keyring::Alice.public().into(), + )); + + // different public key -> fail + assert!(!ecdsa::Pair::verify_prehashed( + &sig.clone().into(), + &msg, + &Keyring::Bob.public().into(), + )); + + let msg = keccak_256(b"I am not Alice!"); + + // different msg -> fail + assert!(!ecdsa::Pair::verify_prehashed(&sig.into(), &msg, &Keyring::Alice.public().into(),)); +} + +#[test] +fn pair_works() { + let want = crypto::Pair::from_string("//Alice", None).expect("Pair failed").to_raw_vec(); + let got = Keyring::Alice.pair().to_raw_vec(); + assert_eq!(want, got); + + let want = crypto::Pair::from_string("//Bob", None).expect("Pair failed").to_raw_vec(); + let got = Keyring::Bob.pair().to_raw_vec(); + assert_eq!(want, got); + + let want = crypto::Pair::from_string("//Charlie", None).expect("Pair failed").to_raw_vec(); + let got = Keyring::Charlie.pair().to_raw_vec(); + assert_eq!(want, got); + + let want = crypto::Pair::from_string("//Dave", None).expect("Pair failed").to_raw_vec(); + let got = Keyring::Dave.pair().to_raw_vec(); + assert_eq!(want, got); + + let want = crypto::Pair::from_string("//Eve", None).expect("Pair failed").to_raw_vec(); + let got = Keyring::Eve.pair().to_raw_vec(); + assert_eq!(want, got); + + let want = crypto::Pair::from_string("//Ferdie", None).expect("Pair failed").to_raw_vec(); + let got = Keyring::Ferdie.pair().to_raw_vec(); + assert_eq!(want, got); + + let want = crypto::Pair::from_string("//One", None).expect("Pair failed").to_raw_vec(); + let got = Keyring::One.pair().to_raw_vec(); + assert_eq!(want, got); + + let want = crypto::Pair::from_string("//Two", None).expect("Pair failed").to_raw_vec(); + let got = Keyring::Two.pair().to_raw_vec(); + assert_eq!(want, got); +} + +#[test] +fn authority_id_works() { + let store = keystore(); + + let alice: crypto::Public = + SyncCryptoStore::ecdsa_generate_new(&*store, KEY_TYPE, Some(&Keyring::Alice.to_seed())) + .ok() + .unwrap() + .into(); + + let bob = Keyring::Bob.public(); + let charlie = Keyring::Charlie.public(); + + let store: BeefyKeystore = Some(store).into(); + + let mut keys = vec![bob, charlie]; + + let id = store.authority_id(keys.as_slice()); + assert!(id.is_none()); + + keys.push(alice.clone()); + + let id = store.authority_id(keys.as_slice()).unwrap(); + assert_eq!(id, alice); +} + +#[test] +fn sign_works() { + let store = keystore(); + + let alice: crypto::Public = + SyncCryptoStore::ecdsa_generate_new(&*store, KEY_TYPE, Some(&Keyring::Alice.to_seed())) + .ok() + .unwrap() + .into(); + + let store: BeefyKeystore = Some(store).into(); + + let msg = b"are you involved or commited?"; + + let sig1 = store.sign(&alice, msg).unwrap(); + let sig2 = Keyring::Alice.sign(msg); + + assert_eq!(sig1, sig2); +} + +#[test] +fn sign_error() { + let store = keystore(); + + let _ = SyncCryptoStore::ecdsa_generate_new(&*store, KEY_TYPE, Some(&Keyring::Bob.to_seed())) + .ok() + .unwrap(); + + let store: BeefyKeystore = Some(store).into(); + + let alice = Keyring::Alice.public(); + + let msg = b"are you involved or commited?"; + let sig = store.sign(&alice, msg).err().unwrap(); + let err = Error::Signature("ecdsa_sign_prehashed() failed".to_string()); + + assert_eq!(sig, err); +} + +#[test] +fn sign_no_keystore() { + let store: BeefyKeystore = None.into(); + + let alice = Keyring::Alice.public(); + let msg = b"are you involved or commited"; + + let sig = store.sign(&alice, msg).err().unwrap(); + let err = Error::Keystore("no Keystore".to_string()); + assert_eq!(sig, err); +} + +#[test] +fn verify_works() { + let store = keystore(); + + let alice: crypto::Public = + SyncCryptoStore::ecdsa_generate_new(&*store, KEY_TYPE, Some(&Keyring::Alice.to_seed())) + .ok() + .unwrap() + .into(); + + let store: BeefyKeystore = Some(store).into(); + + // `msg` and `sig` match + let msg = b"are you involved or commited?"; + let sig = store.sign(&alice, msg).unwrap(); + assert!(BeefyKeystore::verify(&alice, &sig, msg)); + + // `msg and `sig` don't match + let msg = b"you are just involved"; + assert!(!BeefyKeystore::verify(&alice, &sig, msg)); +} + +// Note that we use keys with and without a seed for this test. +#[test] +fn public_keys_works() { + const TEST_TYPE: sp_application_crypto::KeyTypeId = sp_application_crypto::KeyTypeId(*b"test"); + + let store = keystore(); + + let add_key = |key_type, seed: Option<&str>| { + SyncCryptoStore::ecdsa_generate_new(&*store, key_type, seed).unwrap() + }; + + // test keys + let _ = add_key(TEST_TYPE, Some(Keyring::Alice.to_seed().as_str())); + let _ = add_key(TEST_TYPE, Some(Keyring::Bob.to_seed().as_str())); + + let _ = add_key(TEST_TYPE, None); + let _ = add_key(TEST_TYPE, None); + + // BEEFY keys + let _ = add_key(KEY_TYPE, Some(Keyring::Dave.to_seed().as_str())); + let _ = add_key(KEY_TYPE, Some(Keyring::Eve.to_seed().as_str())); + + let key1: crypto::Public = add_key(KEY_TYPE, None).into(); + let key2: crypto::Public = add_key(KEY_TYPE, None).into(); + + let store: BeefyKeystore = Some(store).into(); + + let keys = store.public_keys().ok().unwrap(); + + assert!(keys.len() == 4); + assert!(keys.contains(&Keyring::Dave.public())); + assert!(keys.contains(&Keyring::Eve.public())); + assert!(keys.contains(&key1)); + assert!(keys.contains(&key2)); +} diff --git a/client/beefy/src/lib.rs b/client/beefy/src/lib.rs new file mode 100644 index 0000000000000..b2372b2a6c518 --- /dev/null +++ b/client/beefy/src/lib.rs @@ -0,0 +1,159 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::sync::Arc; + +use log::debug; +use prometheus::Registry; + +use sc_client_api::{Backend, BlockchainEvents, Finalizer}; +use sc_network_gossip::{GossipEngine, Network as GossipNetwork}; + +use sp_api::ProvideRuntimeApi; +use sp_blockchain::HeaderBackend; +use sp_keystore::SyncCryptoStorePtr; +use sp_runtime::traits::Block; + +use beefy_primitives::BeefyApi; + +mod error; +mod gossip; +mod keystore; +mod metrics; +mod round; +mod worker; + +pub mod notification; + +pub const BEEFY_PROTOCOL_NAME: &str = "/paritytech/beefy/1"; + +/// Returns the configuration value to put in +/// [`sc_network::config::NetworkConfiguration::extra_sets`]. +pub fn beefy_peers_set_config() -> sc_network::config::NonDefaultSetConfig { + let mut cfg = + sc_network::config::NonDefaultSetConfig::new(BEEFY_PROTOCOL_NAME.into(), 1024 * 1024); + cfg.allow_non_reserved(25, 25); + cfg +} + +/// A convenience BEEFY client trait that defines all the type bounds a BEEFY client +/// has to satisfy. Ideally that should actually be a trait alias. Unfortunately as +/// of today, Rust does not allow a type alias to be used as a trait bound. Tracking +/// issue is . +pub trait Client: + BlockchainEvents + HeaderBackend + Finalizer + ProvideRuntimeApi + Send + Sync +where + B: Block, + BE: Backend, +{ + // empty +} + +impl Client for T +where + B: Block, + BE: Backend, + T: BlockchainEvents + + HeaderBackend + + Finalizer + + ProvideRuntimeApi + + Send + + Sync, +{ + // empty +} + +/// BEEFY gadget initialization parameters. +pub struct BeefyParams +where + B: Block, + BE: Backend, + C: Client, + C::Api: BeefyApi, + N: GossipNetwork + Clone + Send + 'static, +{ + /// BEEFY client + pub client: Arc, + /// Client Backend + pub backend: Arc, + /// Local key store + pub key_store: Option, + /// Gossip network + pub network: N, + /// BEEFY signed commitment sender + pub signed_commitment_sender: notification::BeefySignedCommitmentSender, + /// Minimal delta between blocks, BEEFY should vote for + pub min_block_delta: u32, + /// Prometheus metric registry + pub prometheus_registry: Option, +} + +/// Start the BEEFY gadget. +/// +/// This is a thin shim around running and awaiting a BEEFY worker. +pub async fn start_beefy_gadget(beefy_params: BeefyParams) +where + B: Block, + BE: Backend, + C: Client, + C::Api: BeefyApi, + N: GossipNetwork + Clone + Send + 'static, +{ + let BeefyParams { + client, + backend, + key_store, + network, + signed_commitment_sender, + min_block_delta, + prometheus_registry, + } = beefy_params; + + let gossip_validator = Arc::new(gossip::GossipValidator::new()); + let gossip_engine = + GossipEngine::new(network, BEEFY_PROTOCOL_NAME, gossip_validator.clone(), None); + + let metrics = + prometheus_registry.as_ref().map(metrics::Metrics::register).and_then( + |result| match result { + Ok(metrics) => { + debug!(target: "beefy", "🥩 Registered metrics"); + Some(metrics) + }, + Err(err) => { + debug!(target: "beefy", "🥩 Failed to register metrics: {:?}", err); + None + }, + }, + ); + + let worker_params = worker::WorkerParams { + client, + backend, + key_store: key_store.into(), + signed_commitment_sender, + gossip_engine, + gossip_validator, + min_block_delta, + metrics, + }; + + let worker = worker::BeefyWorker::<_, _, _>::new(worker_params); + + worker.run().await +} diff --git a/client/beefy/src/metrics.rs b/client/beefy/src/metrics.rs new file mode 100644 index 0000000000000..0fdc29f97c37a --- /dev/null +++ b/client/beefy/src/metrics.rs @@ -0,0 +1,93 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! BEEFY Prometheus metrics definition + +use prometheus::{register, Counter, Gauge, PrometheusError, Registry, U64}; + +/// BEEFY metrics exposed through Prometheus +pub(crate) struct Metrics { + /// Current active validator set id + pub beefy_validator_set_id: Gauge, + /// Total number of votes sent by this node + pub beefy_votes_sent: Counter, + /// Most recent concluded voting round + pub beefy_round_concluded: Gauge, + /// Best block finalized by BEEFY + pub beefy_best_block: Gauge, + /// Next block BEEFY should vote on + pub beefy_should_vote_on: Gauge, + /// Number of sessions without a signed commitment + pub beefy_skipped_sessions: Counter, +} + +impl Metrics { + pub(crate) fn register(registry: &Registry) -> Result { + Ok(Self { + beefy_validator_set_id: register( + Gauge::new("beefy_validator_set_id", "Current BEEFY active validator set id.")?, + registry, + )?, + beefy_votes_sent: register( + Counter::new("beefy_votes_sent", "Number of votes sent by this node")?, + registry, + )?, + beefy_round_concluded: register( + Gauge::new("beefy_round_concluded", "Voting round, that has been concluded")?, + registry, + )?, + beefy_best_block: register( + Gauge::new("beefy_best_block", "Best block finalized by BEEFY")?, + registry, + )?, + beefy_should_vote_on: register( + Gauge::new("beefy_should_vote_on", "Next block, BEEFY should vote on")?, + registry, + )?, + beefy_skipped_sessions: register( + Counter::new( + "beefy_skipped_sessions", + "Number of sessions without a signed commitment", + )?, + registry, + )?, + }) + } +} + +// Note: we use the `format` macro to convert an expr into a `u64`. This will fail, +// if expr does not derive `Display`. +#[macro_export] +macro_rules! metric_set { + ($self:ident, $m:ident, $v:expr) => {{ + let val: u64 = format!("{}", $v).parse().unwrap(); + + if let Some(metrics) = $self.metrics.as_ref() { + metrics.$m.set(val); + } + }}; +} + +#[macro_export] +macro_rules! metric_inc { + ($self:ident, $m:ident) => {{ + if let Some(metrics) = $self.metrics.as_ref() { + metrics.$m.inc(); + } + }}; +} diff --git a/client/beefy/src/notification.rs b/client/beefy/src/notification.rs new file mode 100644 index 0000000000000..6099c9681447b --- /dev/null +++ b/client/beefy/src/notification.rs @@ -0,0 +1,113 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::sync::Arc; + +use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use sp_runtime::traits::{Block, NumberFor}; + +use parking_lot::Mutex; + +/// Stream of signed commitments returned when subscribing. +pub type SignedCommitment = + beefy_primitives::SignedCommitment, beefy_primitives::MmrRootHash>; + +/// Stream of signed commitments returned when subscribing. +type SignedCommitmentStream = TracingUnboundedReceiver>; + +/// Sending endpoint for notifying about signed commitments. +type SignedCommitmentSender = TracingUnboundedSender>; + +/// Collection of channel sending endpoints shared with the receiver side so they can register +/// themselves. +type SharedSignedCommitmentSenders = Arc>>>; + +/// The sending half of the signed commitment channel(s). +/// +/// Used to send notifications about signed commitments generated at the end of a BEEFY round. +#[derive(Clone)] +pub struct BeefySignedCommitmentSender +where + B: Block, +{ + subscribers: SharedSignedCommitmentSenders, +} + +impl BeefySignedCommitmentSender +where + B: Block, +{ + /// The `subscribers` should be shared with a corresponding `SignedCommitmentSender`. + fn new(subscribers: SharedSignedCommitmentSenders) -> Self { + Self { subscribers } + } + + /// Send out a notification to all subscribers that a new signed commitment is available for a + /// block. + pub fn notify(&self, signed_commitment: SignedCommitment) { + let mut subscribers = self.subscribers.lock(); + + // do an initial prune on closed subscriptions + subscribers.retain(|n| !n.is_closed()); + + if !subscribers.is_empty() { + subscribers.retain(|n| n.unbounded_send(signed_commitment.clone()).is_ok()); + } + } +} + +/// The receiving half of the signed commitments channel. +/// +/// Used to receive notifications about signed commitments generated at the end of a BEEFY round. +/// The `BeefySignedCommitmentStream` entity stores the `SharedSignedCommitmentSenders` so it can be +/// used to add more subscriptions. +#[derive(Clone)] +pub struct BeefySignedCommitmentStream +where + B: Block, +{ + subscribers: SharedSignedCommitmentSenders, +} + +impl BeefySignedCommitmentStream +where + B: Block, +{ + /// Creates a new pair of receiver and sender of signed commitment notifications. + pub fn channel() -> (BeefySignedCommitmentSender, Self) { + let subscribers = Arc::new(Mutex::new(vec![])); + let receiver = BeefySignedCommitmentStream::new(subscribers.clone()); + let sender = BeefySignedCommitmentSender::new(subscribers); + (sender, receiver) + } + + /// Create a new receiver of signed commitment notifications. + /// + /// The `subscribers` should be shared with a corresponding `BeefySignedCommitmentSender`. + fn new(subscribers: SharedSignedCommitmentSenders) -> Self { + Self { subscribers } + } + + /// Subscribe to a channel through which signed commitments are sent at the end of each BEEFY + /// voting round. + pub fn subscribe(&self) -> SignedCommitmentStream { + let (sender, receiver) = tracing_unbounded("mpsc_signed_commitments_notification_stream"); + self.subscribers.lock().push(sender); + receiver + } +} diff --git a/client/beefy/src/round.rs b/client/beefy/src/round.rs new file mode 100644 index 0000000000000..7d443603b364e --- /dev/null +++ b/client/beefy/src/round.rs @@ -0,0 +1,121 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::{collections::BTreeMap, hash::Hash}; + +use log::{debug, trace}; + +use beefy_primitives::{ + crypto::{Public, Signature}, + ValidatorSet, ValidatorSetId, +}; +use sp_arithmetic::traits::AtLeast32BitUnsigned; +use sp_runtime::traits::MaybeDisplay; + +#[derive(Default)] +struct RoundTracker { + votes: Vec<(Public, Signature)>, +} + +impl RoundTracker { + fn add_vote(&mut self, vote: (Public, Signature)) -> bool { + // this needs to handle equivocations in the future + if self.votes.contains(&vote) { + return false + } + + self.votes.push(vote); + true + } + + fn is_done(&self, threshold: usize) -> bool { + self.votes.len() >= threshold + } +} + +fn threshold(authorities: usize) -> usize { + let faulty = authorities.saturating_sub(1) / 3; + authorities - faulty +} + +pub(crate) struct Rounds { + rounds: BTreeMap<(Hash, Number), RoundTracker>, + validator_set: ValidatorSet, +} + +impl Rounds +where + H: Ord + Hash, + N: Ord + AtLeast32BitUnsigned + MaybeDisplay, +{ + pub(crate) fn new(validator_set: ValidatorSet) -> Self { + Rounds { rounds: BTreeMap::new(), validator_set } + } +} + +impl Rounds +where + H: Ord + Hash, + N: Ord + AtLeast32BitUnsigned + MaybeDisplay, +{ + pub(crate) fn validator_set_id(&self) -> ValidatorSetId { + self.validator_set.id + } + + pub(crate) fn validators(&self) -> Vec { + self.validator_set.validators.clone() + } + + pub(crate) fn add_vote(&mut self, round: (H, N), vote: (Public, Signature)) -> bool { + self.rounds.entry(round).or_default().add_vote(vote) + } + + pub(crate) fn is_done(&self, round: &(H, N)) -> bool { + let done = self + .rounds + .get(round) + .map(|tracker| tracker.is_done(threshold(self.validator_set.validators.len()))) + .unwrap_or(false); + + debug!(target: "beefy", "🥩 Round #{} done: {}", round.1, done); + + done + } + + pub(crate) fn drop(&mut self, round: &(H, N)) -> Option>> { + trace!(target: "beefy", "🥩 About to drop round #{}", round.1); + + let signatures = self.rounds.remove(round)?.votes; + + Some( + self.validator_set + .validators + .iter() + .map(|authority_id| { + signatures.iter().find_map(|(id, sig)| { + if id == authority_id { + Some(sig.clone()) + } else { + None + } + }) + }) + .collect(), + ) + } +} diff --git a/client/beefy/src/worker.rs b/client/beefy/src/worker.rs new file mode 100644 index 0000000000000..3f52686930332 --- /dev/null +++ b/client/beefy/src/worker.rs @@ -0,0 +1,534 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use std::{collections::BTreeSet, fmt::Debug, marker::PhantomData, sync::Arc}; + +use codec::{Codec, Decode, Encode}; +use futures::{future, FutureExt, StreamExt}; +use log::{debug, error, info, trace, warn}; +use parking_lot::Mutex; + +use sc_client_api::{Backend, FinalityNotification, FinalityNotifications}; +use sc_network_gossip::GossipEngine; + +use sp_api::BlockId; +use sp_arithmetic::traits::AtLeast32Bit; +use sp_runtime::{ + generic::OpaqueDigestItemId, + traits::{Block, Header, NumberFor}, + SaturatedConversion, +}; + +use beefy_primitives::{ + crypto::{AuthorityId, Public, Signature}, + BeefyApi, Commitment, ConsensusLog, MmrRootHash, SignedCommitment, ValidatorSet, + VersionedCommitment, VoteMessage, BEEFY_ENGINE_ID, GENESIS_AUTHORITY_SET_ID, +}; + +use crate::{ + error, + gossip::{topic, GossipValidator}, + keystore::BeefyKeystore, + metric_inc, metric_set, + metrics::Metrics, + notification, round, Client, +}; + +pub(crate) struct WorkerParams +where + B: Block, +{ + pub client: Arc, + pub backend: Arc, + pub key_store: BeefyKeystore, + pub signed_commitment_sender: notification::BeefySignedCommitmentSender, + pub gossip_engine: GossipEngine, + pub gossip_validator: Arc>, + pub min_block_delta: u32, + pub metrics: Option, +} + +/// A BEEFY worker plays the BEEFY protocol +pub(crate) struct BeefyWorker +where + B: Block, + BE: Backend, + C: Client, +{ + client: Arc, + backend: Arc, + key_store: BeefyKeystore, + signed_commitment_sender: notification::BeefySignedCommitmentSender, + gossip_engine: Arc>>, + gossip_validator: Arc>, + /// Min delta in block numbers between two blocks, BEEFY should vote on + min_block_delta: u32, + metrics: Option, + rounds: round::Rounds>, + finality_notifications: FinalityNotifications, + /// Best block we received a GRANDPA notification for + best_grandpa_block: NumberFor, + /// Best block a BEEFY voting round has been concluded for + best_beefy_block: Option>, + /// Validator set id for the last signed commitment + last_signed_id: u64, + // keep rustc happy + _backend: PhantomData, +} + +impl BeefyWorker +where + B: Block + Codec, + BE: Backend, + C: Client, + C::Api: BeefyApi, +{ + /// Return a new BEEFY worker instance. + /// + /// Note that a BEEFY worker is only fully functional if a corresponding + /// BEEFY pallet has been deployed on-chain. + /// + /// The BEEFY pallet is needed in order to keep track of the BEEFY authority set. + pub(crate) fn new(worker_params: WorkerParams) -> Self { + let WorkerParams { + client, + backend, + key_store, + signed_commitment_sender, + gossip_engine, + gossip_validator, + min_block_delta, + metrics, + } = worker_params; + + BeefyWorker { + client: client.clone(), + backend, + key_store, + signed_commitment_sender, + gossip_engine: Arc::new(Mutex::new(gossip_engine)), + gossip_validator, + min_block_delta, + metrics, + rounds: round::Rounds::new(ValidatorSet::empty()), + finality_notifications: client.finality_notification_stream(), + best_grandpa_block: client.info().finalized_number, + best_beefy_block: None, + last_signed_id: 0, + _backend: PhantomData, + } + } +} + +impl BeefyWorker +where + B: Block, + BE: Backend, + C: Client, + C::Api: BeefyApi, +{ + /// Return `true`, if we should vote on block `number` + fn should_vote_on(&self, number: NumberFor) -> bool { + let best_beefy_block = if let Some(block) = self.best_beefy_block { + block + } else { + debug!(target: "beefy", "🥩 Missing best BEEFY block - won't vote for: {:?}", number); + return false + }; + + let target = vote_target(self.best_grandpa_block, best_beefy_block, self.min_block_delta); + + trace!(target: "beefy", "🥩 should_vote_on: #{:?}, next_block_to_vote_on: #{:?}", number, target); + + metric_set!(self, beefy_should_vote_on, target); + + number == target + } + + /// Return the current active validator set at header `header`. + /// + /// Note that the validator set could be `None`. This is the case if we don't find + /// a BEEFY authority set change and we can't fetch the authority set from the + /// BEEFY on-chain state. + /// + /// Such a failure is usually an indication that the BEEFY pallet has not been deployed (yet). + fn validator_set(&self, header: &B::Header) -> Option> { + let new = if let Some(new) = find_authorities_change::(header) { + Some(new) + } else { + let at = BlockId::hash(header.hash()); + self.client.runtime_api().validator_set(&at).ok() + }; + + trace!(target: "beefy", "🥩 active validator set: {:?}", new); + + new + } + + /// Verify `active` validator set for `block` against the key store + /// + /// The critical case is, if we do have a public key in the key store which is not + /// part of the active validator set. + /// + /// Note that for a non-authority node there will be no keystore, and we will + /// return an error and don't check. The error can usually be ignored. + fn verify_validator_set( + &self, + block: &NumberFor, + mut active: ValidatorSet, + ) -> Result<(), error::Error> { + let active: BTreeSet = active.validators.drain(..).collect(); + + let store: BTreeSet = self.key_store.public_keys()?.drain(..).collect(); + + let missing: Vec<_> = store.difference(&active).cloned().collect(); + + if !missing.is_empty() { + debug!(target: "beefy", "🥩 for block {:?} public key missing in validator set: {:?}", block, missing); + } + + Ok(()) + } + + fn handle_finality_notification(&mut self, notification: FinalityNotification) { + trace!(target: "beefy", "🥩 Finality notification: {:?}", notification); + + // update best GRANDPA finalized block we have seen + self.best_grandpa_block = *notification.header.number(); + + if let Some(active) = self.validator_set(¬ification.header) { + // Authority set change or genesis set id triggers new voting rounds + // + // TODO: (adoerr) Enacting a new authority set will also implicitly 'conclude' + // the currently active BEEFY voting round by starting a new one. This is + // temporary and needs to be replaced by proper round life cycle handling. + if active.id != self.rounds.validator_set_id() || + (active.id == GENESIS_AUTHORITY_SET_ID && self.best_beefy_block.is_none()) + { + debug!(target: "beefy", "🥩 New active validator set id: {:?}", active); + metric_set!(self, beefy_validator_set_id, active.id); + + // BEEFY should produce a signed commitment for each session + if active.id != self.last_signed_id + 1 && active.id != GENESIS_AUTHORITY_SET_ID { + metric_inc!(self, beefy_skipped_sessions); + } + + // verify the new validator set + let _ = self.verify_validator_set(notification.header.number(), active.clone()); + + self.rounds = round::Rounds::new(active.clone()); + + debug!(target: "beefy", "🥩 New Rounds for id: {:?}", active.id); + + self.best_beefy_block = Some(*notification.header.number()); + + // this metric is kind of 'fake'. Best BEEFY block should only be updated once we + // have a signed commitment for the block. Remove once the above TODO is done. + metric_set!(self, beefy_best_block, *notification.header.number()); + } + } + + if self.should_vote_on(*notification.header.number()) { + let authority_id = if let Some(id) = + self.key_store.authority_id(self.rounds.validators().as_slice()) + { + debug!(target: "beefy", "🥩 Local authority id: {:?}", id); + id + } else { + debug!(target: "beefy", "🥩 Missing validator id - can't vote for: {:?}", notification.header.hash()); + return + }; + + let mmr_root = + if let Some(hash) = find_mmr_root_digest::(¬ification.header) { + hash + } else { + warn!(target: "beefy", "🥩 No MMR root digest found for: {:?}", notification.header.hash()); + return + }; + + let commitment = Commitment { + payload: mmr_root, + block_number: notification.header.number(), + validator_set_id: self.rounds.validator_set_id(), + }; + let encoded_commitment = commitment.encode(); + + let signature = match self.key_store.sign(&authority_id, &*encoded_commitment) { + Ok(sig) => sig, + Err(err) => { + warn!(target: "beefy", "🥩 Error signing commitment: {:?}", err); + return + }, + }; + + trace!( + target: "beefy", + "🥩 Produced signature using {:?}, is_valid: {:?}", + authority_id, + BeefyKeystore::verify(&authority_id, &signature, &*encoded_commitment) + ); + + let message = VoteMessage { commitment, id: authority_id, signature }; + + let encoded_message = message.encode(); + + metric_inc!(self, beefy_votes_sent); + + debug!(target: "beefy", "🥩 Sent vote message: {:?}", message); + + self.handle_vote( + (message.commitment.payload, *message.commitment.block_number), + (message.id, message.signature), + ); + + self.gossip_engine.lock().gossip_message(topic::(), encoded_message, false); + } + } + + fn handle_vote(&mut self, round: (MmrRootHash, NumberFor), vote: (Public, Signature)) { + self.gossip_validator.note_round(round.1); + + let vote_added = self.rounds.add_vote(round, vote); + + if vote_added && self.rounds.is_done(&round) { + if let Some(signatures) = self.rounds.drop(&round) { + // id is stored for skipped session metric calculation + self.last_signed_id = self.rounds.validator_set_id(); + + let commitment = Commitment { + payload: round.0, + block_number: round.1, + validator_set_id: self.last_signed_id, + }; + + let signed_commitment = SignedCommitment { commitment, signatures }; + + metric_set!(self, beefy_round_concluded, round.1); + + info!(target: "beefy", "🥩 Round #{} concluded, committed: {:?}.", round.1, signed_commitment); + + if self + .backend + .append_justification( + BlockId::Number(round.1), + ( + BEEFY_ENGINE_ID, + VersionedCommitment::V1(signed_commitment.clone()).encode(), + ), + ) + .is_err() + { + // just a trace, because until the round lifecycle is improved, we will + // conclude certain rounds multiple times. + trace!(target: "beefy", "🥩 Failed to append justification: {:?}", signed_commitment); + } + + self.signed_commitment_sender.notify(signed_commitment); + self.best_beefy_block = Some(round.1); + + metric_set!(self, beefy_best_block, round.1); + } + } + } + + pub(crate) async fn run(mut self) { + let mut votes = Box::pin(self.gossip_engine.lock().messages_for(topic::()).filter_map( + |notification| async move { + debug!(target: "beefy", "🥩 Got vote message: {:?}", notification); + + VoteMessage::, Public, Signature>::decode( + &mut ¬ification.message[..], + ) + .ok() + }, + )); + + loop { + let engine = self.gossip_engine.clone(); + let gossip_engine = future::poll_fn(|cx| engine.lock().poll_unpin(cx)); + + futures::select! { + notification = self.finality_notifications.next().fuse() => { + if let Some(notification) = notification { + self.handle_finality_notification(notification); + } else { + return; + } + }, + vote = votes.next().fuse() => { + if let Some(vote) = vote { + self.handle_vote( + (vote.commitment.payload, vote.commitment.block_number), + (vote.id, vote.signature), + ); + } else { + return; + } + }, + _ = gossip_engine.fuse() => { + error!(target: "beefy", "🥩 Gossip engine has terminated."); + return; + } + } + } + } +} + +/// Extract the MMR root hash from a digest in the given header, if it exists. +fn find_mmr_root_digest(header: &B::Header) -> Option +where + B: Block, + Id: Codec, +{ + header.digest().logs().iter().find_map(|log| { + match log.try_to::>(OpaqueDigestItemId::Consensus(&BEEFY_ENGINE_ID)) { + Some(ConsensusLog::MmrRoot(root)) => Some(root), + _ => None, + } + }) +} + +/// Scan the `header` digest log for a BEEFY validator set change. Return either the new +/// validator set or `None` in case no validator set change has been signaled. +fn find_authorities_change(header: &B::Header) -> Option> +where + B: Block, +{ + let id = OpaqueDigestItemId::Consensus(&BEEFY_ENGINE_ID); + + let filter = |log: ConsensusLog| match log { + ConsensusLog::AuthoritiesChange(validator_set) => Some(validator_set), + _ => None, + }; + + header.digest().convert_first(|l| l.try_to(id).and_then(filter)) +} + +/// Calculate next block number to vote on +fn vote_target(best_grandpa: N, best_beefy: N, min_delta: u32) -> N +where + N: AtLeast32Bit + Copy + Debug, +{ + let diff = best_grandpa.saturating_sub(best_beefy); + let diff = diff.saturated_into::(); + let target = best_beefy + min_delta.max(diff.next_power_of_two()).into(); + + trace!( + target: "beefy", + "🥩 vote target - diff: {:?}, next_power_of_two: {:?}, target block: #{:?}", + diff, + diff.next_power_of_two(), + target, + ); + + target +} + +#[cfg(test)] +mod tests { + use super::vote_target; + + #[test] + fn vote_on_min_block_delta() { + let t = vote_target(1u32, 0, 4); + assert_eq!(4, t); + let t = vote_target(2u32, 0, 4); + assert_eq!(4, t); + let t = vote_target(3u32, 0, 4); + assert_eq!(4, t); + let t = vote_target(4u32, 0, 4); + assert_eq!(4, t); + + let t = vote_target(4u32, 4, 4); + assert_eq!(8, t); + + let t = vote_target(10u32, 10, 4); + assert_eq!(14, t); + let t = vote_target(11u32, 10, 4); + assert_eq!(14, t); + let t = vote_target(12u32, 10, 4); + assert_eq!(14, t); + let t = vote_target(13u32, 10, 4); + assert_eq!(14, t); + + let t = vote_target(10u32, 10, 8); + assert_eq!(18, t); + let t = vote_target(11u32, 10, 8); + assert_eq!(18, t); + let t = vote_target(12u32, 10, 8); + assert_eq!(18, t); + let t = vote_target(13u32, 10, 8); + assert_eq!(18, t); + } + + #[test] + fn vote_on_power_of_two() { + let t = vote_target(1008u32, 1000, 4); + assert_eq!(1008, t); + + let t = vote_target(1016u32, 1000, 4); + assert_eq!(1016, t); + + let t = vote_target(1032u32, 1000, 4); + assert_eq!(1032, t); + + let t = vote_target(1064u32, 1000, 4); + assert_eq!(1064, t); + + let t = vote_target(1128u32, 1000, 4); + assert_eq!(1128, t); + + let t = vote_target(1256u32, 1000, 4); + assert_eq!(1256, t); + + let t = vote_target(1512u32, 1000, 4); + assert_eq!(1512, t); + + let t = vote_target(1024u32, 0, 4); + assert_eq!(1024, t); + } + + #[test] + fn vote_on_target_block() { + let t = vote_target(1008u32, 1002, 4); + assert_eq!(1010, t); + let t = vote_target(1010u32, 1002, 4); + assert_eq!(1010, t); + + let t = vote_target(1016u32, 1006, 4); + assert_eq!(1022, t); + let t = vote_target(1022u32, 1006, 4); + assert_eq!(1022, t); + + let t = vote_target(1032u32, 1012, 4); + assert_eq!(1044, t); + let t = vote_target(1044u32, 1012, 4); + assert_eq!(1044, t); + + let t = vote_target(1064u32, 1014, 4); + assert_eq!(1078, t); + let t = vote_target(1078u32, 1014, 4); + assert_eq!(1078, t); + + let t = vote_target(1128u32, 1008, 4); + assert_eq!(1136, t); + let t = vote_target(1136u32, 1008, 4); + assert_eq!(1136, t); + } +} diff --git a/client/block-builder/Cargo.toml b/client/block-builder/Cargo.toml index 6fef8498134eb..9d2703fc2ed2a 100644 --- a/client/block-builder/Cargo.toml +++ b/client/block-builder/Cargo.toml @@ -4,7 +4,7 @@ version = "0.10.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Substrate block builder" readme = "README.md" diff --git a/client/chain-spec/Cargo.toml b/client/chain-spec/Cargo.toml index 8af2996e968d8..78062e600c3f5 100644 --- a/client/chain-spec/Cargo.toml +++ b/client/chain-spec/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Substrate chain configurations." readme = "README.md" diff --git a/client/chain-spec/derive/Cargo.toml b/client/chain-spec/derive/Cargo.toml index b210fa1320e04..73083e4a6e0ba 100644 --- a/client/chain-spec/derive/Cargo.toml +++ b/client/chain-spec/derive/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Macros to derive chain spec extension traits implementation." @@ -17,7 +17,7 @@ proc-macro = true [dependencies] proc-macro-crate = "1.0.0" proc-macro2 = "1.0.29" -quote = "1.0.3" -syn = "1.0.58" +quote = "1.0.10" +syn = "1.0.80" [dev-dependencies] diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index fcdb053c47c16..c4db6158125b1 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -58,8 +58,9 @@ impl GenesisSource { match self { Self::File(path) => { - let file = - File::open(path).map_err(|e| format!("Error opening spec file: {}", e))?; + let file = File::open(path).map_err(|e| { + format!("Error opening spec file at `{}`: {}", path.display(), e) + })?; let genesis: GenesisContainer = json::from_reader(file) .map_err(|e| format!("Error parsing spec file: {}", e))?; Ok(genesis.genesis) @@ -163,6 +164,7 @@ struct ClientSpec { // Never used, left only for backward compatibility. consensus_engine: (), #[serde(skip_serializing)] + #[allow(unused)] genesis: serde::de::IgnoredAny, /// Mapping from `block_hash` to `wasm_code`. /// @@ -283,7 +285,8 @@ impl ChainSpec { /// Parse json file into a `ChainSpec` pub fn from_json_file(path: PathBuf) -> Result { - let file = File::open(&path).map_err(|e| format!("Error opening spec file: {}", e))?; + let file = File::open(&path) + .map_err(|e| format!("Error opening spec file `{}`: {}", path.display(), e))?; let client_spec = json::from_reader(file).map_err(|e| format!("Error parsing spec file: {}", e))?; Ok(ChainSpec { client_spec, genesis: GenesisSource::File(path) }) diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index e7a0330e76e0c..0cee37f25e797 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] description = "Substrate CLI interface." edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" @@ -14,15 +14,15 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = "0.4.11" -regex = "1.4.2" +regex = "1.5.4" tokio = { version = "1.10", features = [ "signal", "rt-multi-thread" ] } futures = "0.3.9" fdlimit = "0.2.1" libp2p = "0.39.1" -parity-scale-codec = "2.0.0" +parity-scale-codec = "2.3.1" hex = "0.4.2" rand = "0.7.3" -tiny-bip39 = "0.8.0" +tiny-bip39 = "0.8.2" serde_json = "1.0.68" sc-keystore = { version = "4.0.0-dev", path = "../keystore" } sp-panic-handler = { version = "3.0.0", path = "../../primitives/panic-handler" } diff --git a/client/cli/src/commands/check_block_cmd.rs b/client/cli/src/commands/check_block_cmd.rs index 07a76319dca3f..de0d1132ce8be 100644 --- a/client/cli/src/commands/check_block_cmd.rs +++ b/client/cli/src/commands/check_block_cmd.rs @@ -21,7 +21,7 @@ use crate::{ params::{BlockNumberOrHash, ImportParams, SharedParams}, CliConfiguration, }; -use sc_client_api::{BlockBackend, UsageProvider}; +use sc_client_api::{BlockBackend, HeaderBackend}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use std::{fmt::Debug, str::FromStr, sync::Arc}; use structopt::StructOpt; @@ -53,7 +53,7 @@ impl CheckBlockCmd { pub async fn run(&self, client: Arc, import_queue: IQ) -> error::Result<()> where B: BlockT + for<'de> serde::Deserialize<'de>, - C: BlockBackend + UsageProvider + Send + Sync + 'static, + C: BlockBackend + HeaderBackend + Send + Sync + 'static, IQ: sc_service::ImportQueue + 'static, B::Hash: FromStr, ::Err: Debug, diff --git a/client/cli/src/commands/import_blocks_cmd.rs b/client/cli/src/commands/import_blocks_cmd.rs index 9b211b88d5563..19187f2859621 100644 --- a/client/cli/src/commands/import_blocks_cmd.rs +++ b/client/cli/src/commands/import_blocks_cmd.rs @@ -21,7 +21,7 @@ use crate::{ params::{ImportParams, SharedParams}, CliConfiguration, }; -use sc_client_api::UsageProvider; +use sc_client_api::HeaderBackend; use sc_service::chain_ops::import_blocks; use sp_runtime::traits::Block as BlockT; use std::{ @@ -68,7 +68,7 @@ impl ImportBlocksCmd { /// Run the import-blocks command pub async fn run(&self, client: Arc, import_queue: IQ) -> error::Result<()> where - C: UsageProvider + Send + Sync + 'static, + C: HeaderBackend + Send + Sync + 'static, B: BlockT + for<'de> serde::Deserialize<'de>, IQ: sc_service::ImportQueue + 'static, { diff --git a/client/cli/src/commands/insert_key.rs b/client/cli/src/commands/insert_key.rs index 05055dc53c1e2..081c319081607 100644 --- a/client/cli/src/commands/insert_key.rs +++ b/client/cli/src/commands/insert_key.rs @@ -18,7 +18,7 @@ //! Implementation of the `insert` subcommand use crate::{ - utils, with_crypto_scheme, CryptoSchemeFlag, Error, KeystoreParams, SharedParams, SubstrateCli, + utils, with_crypto_scheme, CryptoScheme, Error, KeystoreParams, SharedParams, SubstrateCli, }; use sc_keystore::LocalKeystore; use sc_service::config::{BasePath, KeystoreConfig}; @@ -49,9 +49,14 @@ pub struct InsertKeyCmd { #[structopt(flatten)] pub keystore_params: KeystoreParams, - #[allow(missing_docs)] - #[structopt(flatten)] - pub crypto_scheme: CryptoSchemeFlag, + /// The cryptography scheme that should be used to generate the key out of the given URI. + #[structopt( + long, + value_name = "SCHEME", + possible_values = &CryptoScheme::variants(), + case_insensitive = true, + )] + pub scheme: CryptoScheme, } impl InsertKeyCmd { @@ -68,10 +73,7 @@ impl InsertKeyCmd { let (keystore, public) = match self.keystore_params.keystore_config(&config_dir)? { (_, KeystoreConfig::Path { path, password }) => { - let public = with_crypto_scheme!( - self.crypto_scheme.scheme, - to_vec(&suri, password.clone()) - )?; + let public = with_crypto_scheme!(self.scheme, to_vec(&suri, password.clone()))?; let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::open(path, password)?); (keystore, public) }, @@ -161,6 +163,7 @@ mod tests { "test", "--suri", &uri, + "--scheme=sr25519", ]); assert!(inspect.run(&Cli).is_ok()); diff --git a/client/cli/src/commands/run_cmd.rs b/client/cli/src/commands/run_cmd.rs index 98f2090c6f446..d6c0133a7c145 100644 --- a/client/cli/src/commands/run_cmd.rs +++ b/client/cli/src/commands/run_cmd.rs @@ -127,6 +127,10 @@ pub struct RunCmd { #[structopt(long = "ws-max-connections", value_name = "COUNT")] pub ws_max_connections: Option, + /// Set the the maximum WebSocket output buffer size in MiB. Default is 16. + #[structopt(long = "ws-max-out-buffer-capacity")] + pub ws_max_out_buffer_capacity: Option, + /// Specify browser Origins allowed to access the HTTP & WS RPC servers. /// /// A comma-separated list of origins (protocol://domain or special `null` @@ -241,6 +245,8 @@ pub struct RunCmd { /// /// Note: the directory is random per process execution. This directory is used as base path /// which includes: database, node key and keystore. + /// + /// When `--dev` is given and no explicit `--base-path`, this option is implied. #[structopt(long, conflicts_with = "base-path")] pub tmp: bool, } @@ -432,6 +438,10 @@ impl CliConfiguration for RunCmd { Ok(self.rpc_max_payload) } + fn ws_max_out_buffer_capacity(&self) -> Result> { + Ok(self.ws_max_out_buffer_capacity) + } + fn transaction_pool(&self) -> Result { Ok(self.pool_config.transaction_pool()) } @@ -444,7 +454,12 @@ impl CliConfiguration for RunCmd { Ok(if self.tmp { Some(BasePath::new_temp_dir()?) } else { - self.shared_params().base_path() + match self.shared_params().base_path() { + Some(r) => Some(r), + // If `dev` is enabled, we use the temp base path. + None if self.shared_params().is_dev() => Some(BasePath::new_temp_dir()?), + None => None, + } }) } } diff --git a/client/cli/src/commands/utils.rs b/client/cli/src/commands/utils.rs index 864d7e920f81a..39e5149404b29 100644 --- a/client/cli/src/commands/utils.rs +++ b/client/cli/src/commands/utils.rs @@ -23,7 +23,10 @@ use crate::{ }; use serde_json::json; use sp_core::{ - crypto::{ExposeSecret, SecretString, Ss58AddressFormat, Ss58Codec, Zeroize}, + crypto::{ + unwrap_or_default_ss58_version, ExposeSecret, SecretString, Ss58AddressFormat, Ss58Codec, + Zeroize, + }, hexdisplay::HexDisplay, Pair, }; @@ -72,7 +75,7 @@ pub fn print_from_uri( let password = password.as_ref().map(|s| s.expose_secret().as_str()); if let Ok((pair, seed)) = Pair::from_phrase(uri, password.clone()) { let public_key = pair.public(); - let network_override = network_override.unwrap_or_default(); + let network_override = unwrap_or_default_ss58_version(network_override); match output { OutputType::Json => { @@ -108,7 +111,7 @@ pub fn print_from_uri( } } else if let Ok((pair, seed)) = Pair::from_string_with_seed(uri, password.clone()) { let public_key = pair.public(); - let network_override = network_override.unwrap_or_default(); + let network_override = unwrap_or_default_ss58_version(network_override); match output { OutputType::Json => { @@ -198,7 +201,7 @@ where let public_key = Pair::Public::try_from(&public) .map_err(|_| "Failed to construct public key from given hex")?; - let network_override = network_override.unwrap_or_default(); + let network_override = unwrap_or_default_ss58_version(network_override); match output { OutputType::Json => { diff --git a/client/cli/src/commands/vanity.rs b/client/cli/src/commands/vanity.rs index daeb81e86a1a1..d2953c8396079 100644 --- a/client/cli/src/commands/vanity.rs +++ b/client/cli/src/commands/vanity.rs @@ -22,7 +22,7 @@ use crate::{ error, utils, with_crypto_scheme, CryptoSchemeFlag, NetworkSchemeFlag, OutputTypeFlag, }; use rand::{rngs::OsRng, RngCore}; -use sp_core::crypto::{Ss58AddressFormat, Ss58Codec}; +use sp_core::crypto::{unwrap_or_default_ss58_version, Ss58AddressFormat, Ss58Codec}; use sp_runtime::traits::IdentifyAccount; use structopt::StructOpt; use utils::print_from_uri; @@ -53,7 +53,10 @@ impl VanityCmd { pub fn run(&self) -> error::Result<()> { let formated_seed = with_crypto_scheme!( self.crypto_scheme.scheme, - generate_key(&self.pattern, self.network_scheme.network.clone().unwrap_or_default()), + generate_key( + &self.pattern, + unwrap_or_default_ss58_version(self.network_scheme.network) + ), )?; with_crypto_scheme!( @@ -159,7 +162,10 @@ fn assert_non_empty_string(pattern: &str) -> Result { #[cfg(test)] mod tests { use super::*; - use sp_core::{crypto::Ss58Codec, sr25519, Pair}; + use sp_core::{ + crypto::{default_ss58_version, Ss58AddressFormatRegistry, Ss58Codec}, + sr25519, Pair, + }; use structopt::StructOpt; #[cfg(feature = "bench")] use test::Bencher; @@ -172,7 +178,7 @@ mod tests { #[test] fn test_generation_with_single_char() { - let seed = generate_key::("ab", Default::default()).unwrap(); + let seed = generate_key::("ab", default_ss58_version()).unwrap(); assert!(sr25519::Pair::from_seed_slice(&hex::decode(&seed[2..]).unwrap()) .unwrap() .public() @@ -182,11 +188,13 @@ mod tests { #[test] fn generate_key_respects_network_override() { - let seed = generate_key::("ab", Ss58AddressFormat::PolkadotAccount).unwrap(); + let seed = + generate_key::("ab", Ss58AddressFormatRegistry::PolkadotAccount.into()) + .unwrap(); assert!(sr25519::Pair::from_seed_slice(&hex::decode(&seed[2..]).unwrap()) .unwrap() .public() - .to_ss58check_with_version(Ss58AddressFormat::PolkadotAccount) + .to_ss58check_with_version(Ss58AddressFormatRegistry::PolkadotAccount.into()) .contains("ab")); } diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index 59fc6bd438a1c..86eeed5b40237 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -229,7 +229,7 @@ pub trait CliConfiguration: Sized { let paritydb_path = base_path.join("paritydb").join(role_dir); Ok(match database { Database::RocksDb => DatabaseSource::RocksDb { path: rocksdb_path, cache_size }, - Database::ParityDb => DatabaseSource::ParityDb { path: rocksdb_path }, + Database::ParityDb => DatabaseSource::ParityDb { path: paritydb_path }, Database::Auto => DatabaseSource::Auto { paritydb_path, rocksdb_path, cache_size }, }) } @@ -360,6 +360,11 @@ pub trait CliConfiguration: Sized { Ok(None) } + /// Get maximum WS output buffer capacity. + fn ws_max_out_buffer_capacity(&self) -> Result> { + Ok(None) + } + /// Get the prometheus configuration (`None` if disabled) /// /// By default this is `None`. @@ -513,6 +518,7 @@ pub trait CliConfiguration: Sized { rpc_ws_max_connections: self.rpc_ws_max_connections()?, rpc_cors: self.rpc_cors(is_dev)?, rpc_max_payload: self.rpc_max_payload()?, + ws_max_out_buffer_capacity: self.ws_max_out_buffer_capacity()?, prometheus_config: self.prometheus_config(DCV::prometheus_listen_port())?, telemetry_endpoints, default_heap_pages: self.default_heap_pages()?, @@ -522,7 +528,6 @@ pub trait CliConfiguration: Sized { dev_key_seed: self.dev_key_seed(is_dev)?, tracing_targets: self.tracing_targets()?, tracing_receiver: self.tracing_receiver()?, - disable_log_reloading: self.is_log_filter_reloading_disabled()?, chain_spec, max_runtime_instances, announce_block: self.announce_block()?, @@ -542,9 +547,9 @@ pub trait CliConfiguration: Sized { Ok(self.shared_params().log_filters().join(",")) } - /// Is log reloading disabled (enabled by default) - fn is_log_filter_reloading_disabled(&self) -> Result { - Ok(self.shared_params().is_log_filter_reloading_disabled()) + /// Is log reloading enabled? + fn enable_log_reloading(&self) -> Result { + Ok(self.shared_params().enable_log_reloading()) } /// Should the log color output be disabled? @@ -563,7 +568,7 @@ pub trait CliConfiguration: Sized { sp_panic_handler::set(&C::support_url(), &C::impl_version()); let mut logger = LoggerBuilder::new(self.log_filters()?); - logger.with_log_reloading(!self.is_log_filter_reloading_disabled()?); + logger.with_log_reloading(self.enable_log_reloading()?); if let Some(tracing_targets) = self.tracing_targets()? { let tracing_receiver = self.tracing_receiver()?; diff --git a/client/cli/src/params/shared_params.rs b/client/cli/src/params/shared_params.rs index 41472387d2639..58aabb3148dd2 100644 --- a/client/cli/src/params/shared_params.rs +++ b/client/cli/src/params/shared_params.rs @@ -32,6 +32,9 @@ pub struct SharedParams { pub chain: Option, /// Specify the development chain. + /// + /// This flag sets `--chain=dev`, `--force-authoring`, `--rpc-cors=all`, + /// `--alice`, and `--tmp` flags, unless explicitly overridden. #[structopt(long, conflicts_with_all = &["chain"])] pub dev: bool, @@ -50,13 +53,15 @@ pub struct SharedParams { #[structopt(long)] pub disable_log_color: bool, - /// Disable feature to dynamically update and reload the log filter. + /// Enable feature to dynamically update and reload the log filter. + /// + /// Be aware that enabling this feature can lead to a performance decrease up to factor six or + /// more. Depending on the global logging level the performance decrease changes. /// - /// By default this feature is enabled, however it leads to a small performance decrease. /// The `system_addLogFilter` and `system_resetLogFilter` RPCs will have no effect with this - /// option set. - #[structopt(long = "disable-log-reloading")] - pub disable_log_reloading: bool, + /// option not being set. + #[structopt(long)] + pub enable_log_reloading: bool, /// Sets a custom profiling filter. Syntax is the same as for logging: = #[structopt(long = "tracing-targets", value_name = "TARGETS")] @@ -107,9 +112,9 @@ impl SharedParams { self.disable_log_color } - /// Is log reloading disabled - pub fn is_log_filter_reloading_disabled(&self) -> bool { - self.disable_log_reloading + /// Is log reloading enabled + pub fn enable_log_reloading(&self) -> bool { + self.enable_log_reloading } /// Receiver to process tracing messages. diff --git a/client/consensus/aura/Cargo.toml b/client/consensus/aura/Cargo.toml index 75595779427bb..1c767319b1229 100644 --- a/client/consensus/aura/Cargo.toml +++ b/client/consensus/aura/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] description = "Aura consensus algorithm for substrate" edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" diff --git a/client/consensus/babe/Cargo.toml b/client/consensus/babe/Cargo.toml index 65dfc57133206..66ad6287f40df 100644 --- a/client/consensus/babe/Cargo.toml +++ b/client/consensus/babe/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] description = "BABE consensus algorithm for substrate" edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" documentation = "https://docs.rs/sc-consensus-babe" readme = "README.md" @@ -50,7 +50,7 @@ schnorrkel = { version = "0.9.1", features = ["preaudit_deprecated"] } rand = "0.7.2" merlin = "2.0" derive_more = "0.99.2" -retain_mut = "0.1.3" +retain_mut = "0.1.4" async-trait = "0.1.50" [dev-dependencies] diff --git a/client/consensus/babe/rpc/Cargo.toml b/client/consensus/babe/rpc/Cargo.toml index 8d5625705a48c..7ef9b1c1de3c4 100644 --- a/client/consensus/babe/rpc/Cargo.toml +++ b/client/consensus/babe/rpc/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] description = "RPC extensions for the BABE consensus algorithm" edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" diff --git a/client/consensus/babe/src/aux_schema.rs b/client/consensus/babe/src/aux_schema.rs index b18220c3e360a..d5b8a218a5a3f 100644 --- a/client/consensus/babe/src/aux_schema.rs +++ b/client/consensus/babe/src/aux_schema.rs @@ -23,14 +23,17 @@ use log::info; use crate::{migration::EpochV0, Epoch}; use sc_client_api::backend::AuxStore; -use sc_consensus_epochs::{migration::EpochChangesForV0, EpochChangesFor, SharedEpochChanges}; +use sc_consensus_epochs::{ + migration::{EpochChangesV0For, EpochChangesV1For}, + EpochChangesFor, SharedEpochChanges, +}; use sp_blockchain::{Error as ClientError, Result as ClientResult}; use sp_consensus_babe::{BabeBlockWeight, BabeGenesisConfiguration}; use sp_runtime::traits::Block as BlockT; const BABE_EPOCH_CHANGES_VERSION: &[u8] = b"babe_epoch_changes_version"; const BABE_EPOCH_CHANGES_KEY: &[u8] = b"babe_epoch_changes"; -const BABE_EPOCH_CHANGES_CURRENT_VERSION: u32 = 2; +const BABE_EPOCH_CHANGES_CURRENT_VERSION: u32 = 3; /// The aux storage key used to store the block weight of the given block hash. pub fn block_weight_key(block_hash: H) -> Vec { @@ -60,11 +63,16 @@ pub fn load_epoch_changes( let maybe_epoch_changes = match version { None => - load_decode::<_, EpochChangesForV0>(backend, BABE_EPOCH_CHANGES_KEY)? + load_decode::<_, EpochChangesV0For>(backend, BABE_EPOCH_CHANGES_KEY)? .map(|v0| v0.migrate().map(|_, _, epoch| epoch.migrate(config))), Some(1) => - load_decode::<_, EpochChangesFor>(backend, BABE_EPOCH_CHANGES_KEY)? - .map(|v1| v1.map(|_, _, epoch| epoch.migrate(config))), + load_decode::<_, EpochChangesV1For>(backend, BABE_EPOCH_CHANGES_KEY)? + .map(|v1| v1.migrate().map(|_, _, epoch| epoch.migrate(config))), + Some(2) => { + // v2 still uses `EpochChanges` v1 format but with a different `Epoch` type. + load_decode::<_, EpochChangesV1For>(backend, BABE_EPOCH_CHANGES_KEY)? + .map(|v2| v2.migrate()) + }, Some(BABE_EPOCH_CHANGES_CURRENT_VERSION) => load_decode::<_, EpochChangesFor>(backend, BABE_EPOCH_CHANGES_KEY)?, Some(other) => @@ -164,7 +172,7 @@ mod test { .insert_aux( &[( BABE_EPOCH_CHANGES_KEY, - &EpochChangesForV0::::from_raw(v0_tree).encode()[..], + &EpochChangesV0For::::from_raw(v0_tree).encode()[..], )], &[], ) @@ -202,6 +210,6 @@ mod test { client.insert_aux(values, &[]).unwrap(); }); - assert_eq!(load_decode::<_, u32>(&client, BABE_EPOCH_CHANGES_VERSION).unwrap(), Some(2)); + assert_eq!(load_decode::<_, u32>(&client, BABE_EPOCH_CHANGES_VERSION).unwrap(), Some(3)); } } diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index a0b6bde025b3f..1fde788041155 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -1578,8 +1578,12 @@ where *block.header.parent_hash(), next_epoch, ) - .map_err(|e| ConsensusError::ClientImport(format!("{:?}", e)))?; - + .map_err(|e| { + ConsensusError::ClientImport(format!( + "Error importing epoch changes: {:?}", + e + )) + })?; Ok(()) }; @@ -1667,6 +1671,9 @@ where Client: HeaderBackend + HeaderMetadata, { let info = client.info(); + if info.block_gap.is_none() { + epoch_changes.clear_gap(); + } let finalized_slot = { let finalized_header = client diff --git a/client/consensus/common/Cargo.toml b/client/consensus/common/Cargo.toml index 6829bd2c6d8b5..30840a974f9aa 100644 --- a/client/consensus/common/Cargo.toml +++ b/client/consensus/common/Cargo.toml @@ -4,7 +4,7 @@ version = "0.10.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Collection of common consensus specific imlementations for Substrate (client)" readme = "README.md" diff --git a/client/consensus/common/src/block_import.rs b/client/consensus/common/src/block_import.rs index 6d411dd9afbf1..d828e54bc7e3e 100644 --- a/client/consensus/common/src/block_import.rs +++ b/client/consensus/common/src/block_import.rs @@ -74,7 +74,7 @@ impl ImportResult { &self, hash: &B::Hash, number: NumberFor, - justification_sync_link: &mut dyn JustificationSyncLink, + justification_sync_link: &dyn JustificationSyncLink, ) where B: BlockT, { diff --git a/client/consensus/common/src/longest_chain.rs b/client/consensus/common/src/longest_chain.rs index b1f7f94f9eb28..7ec91a5ad87e9 100644 --- a/client/consensus/common/src/longest_chain.rs +++ b/client/consensus/common/src/longest_chain.rs @@ -91,11 +91,12 @@ where &self, target_hash: Block::Hash, maybe_max_number: Option>, - ) -> Result, ConsensusError> { + ) -> Result { let import_lock = self.backend.get_import_lock(); self.backend .blockchain() .best_containing(target_hash, maybe_max_number, import_lock) + .map(|maybe_hash| maybe_hash.unwrap_or(target_hash)) .map_err(|e| ConsensusError::ChainLookup(e.to_string()).into()) } } diff --git a/client/consensus/epochs/Cargo.toml b/client/consensus/epochs/Cargo.toml index 78e5cc31ea07e..90a708d07e2a2 100644 --- a/client/consensus/epochs/Cargo.toml +++ b/client/consensus/epochs/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] description = "Generic epochs-based utilities for consensus" edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" diff --git a/client/consensus/epochs/src/lib.rs b/client/consensus/epochs/src/lib.rs index f3cfc55bae69b..c1d0bd1d04050 100644 --- a/client/consensus/epochs/src/lib.rs +++ b/client/consensus/epochs/src/lib.rs @@ -78,11 +78,11 @@ where /// /// Once an epoch is created, it must have a known `start_slot` and `end_slot`, which cannot be /// changed. Consensus engine may modify any other data in the epoch, if needed. -pub trait Epoch { +pub trait Epoch: std::fmt::Debug { /// Descriptor for the next epoch. type NextEpochDescriptor; /// Type of the slot number. - type Slot: Ord + Copy; + type Slot: Ord + Copy + std::fmt::Debug; /// The starting slot of the epoch. fn start_slot(&self) -> Self::Slot; @@ -228,14 +228,21 @@ impl ViableEpochDescriptor { } /// Persisted epoch stored in EpochChanges. -#[derive(Clone, Encode, Decode)] -pub enum PersistedEpoch { +#[derive(Clone, Encode, Decode, Debug)] +pub enum PersistedEpoch { /// Genesis persisted epoch data. epoch_0, epoch_1. Genesis(E, E), /// Regular persisted epoch data. epoch_n. Regular(E), } +impl PersistedEpoch { + /// Returns if this is a genesis epoch. + pub fn is_genesis(&self) -> bool { + matches!(self, Self::Genesis(_, _)) + } +} + impl<'a, E: Epoch> From<&'a PersistedEpoch> for PersistedEpochHeader { fn from(epoch: &'a PersistedEpoch) -> Self { match epoch { @@ -246,8 +253,23 @@ impl<'a, E: Epoch> From<&'a PersistedEpoch> for PersistedEpochHeader { } } +impl PersistedEpoch { + /// Map the epoch to a different type using a conversion function. + pub fn map(self, h: &Hash, n: &Number, f: &mut F) -> PersistedEpoch + where + B: Epoch, + F: FnMut(&Hash, &Number, E) -> B, + { + match self { + PersistedEpoch::Genesis(epoch_0, epoch_1) => + PersistedEpoch::Genesis(f(h, n, epoch_0), f(h, n, epoch_1)), + PersistedEpoch::Regular(epoch_n) => PersistedEpoch::Regular(f(h, n, epoch_n)), + } + } +} + /// Persisted epoch header stored in ForkTree. -#[derive(Encode, Decode, PartialEq, Eq)] +#[derive(Encode, Decode, PartialEq, Eq, Debug)] pub enum PersistedEpochHeader { /// Genesis persisted epoch header. epoch_0, epoch_1. Genesis(EpochHeader, EpochHeader), @@ -264,6 +286,25 @@ impl Clone for PersistedEpochHeader { } } +impl PersistedEpochHeader { + /// Map the epoch header to a different type. + pub fn map(self) -> PersistedEpochHeader + where + B: Epoch, + { + match self { + PersistedEpochHeader::Genesis(epoch_0, epoch_1) => PersistedEpochHeader::Genesis( + EpochHeader { start_slot: epoch_0.start_slot, end_slot: epoch_0.end_slot }, + EpochHeader { start_slot: epoch_1.start_slot, end_slot: epoch_1.end_slot }, + ), + PersistedEpochHeader::Regular(epoch_n) => PersistedEpochHeader::Regular(EpochHeader { + start_slot: epoch_n.start_slot, + end_slot: epoch_n.end_slot, + }), + } + } +} + /// A fresh, incremented epoch to import into the underlying fork-tree. /// /// Create this with `ViableEpoch::increment`. @@ -279,6 +320,106 @@ impl AsRef for IncrementedEpoch { } } +/// A pair of epochs for the gap block download validation. +/// Block gap is created after the warp sync is complete. Blocks +/// are imported both at the tip of the chain and at the start of the gap. +/// This holds a pair of epochs that are required to validate headers +/// at the start of the gap. Since gap download does not allow forks we don't +/// need to keep a tree of epochs. +#[derive(Clone, Encode, Decode, Debug)] +pub struct GapEpochs { + current: (Hash, Number, PersistedEpoch), + next: Option<(Hash, Number, E)>, +} + +impl GapEpochs +where + Hash: Copy + PartialEq + std::fmt::Debug, + Number: Copy + PartialEq + std::fmt::Debug, + E: Epoch, +{ + /// Check if given slot matches one of the gap epochs. + /// Returns epoch identifier if it does. + fn matches( + &self, + slot: E::Slot, + ) -> Option<(Hash, Number, EpochHeader, EpochIdentifierPosition)> { + match &self.current { + (_, _, PersistedEpoch::Genesis(epoch_0, _)) + if slot >= epoch_0.start_slot() && slot < epoch_0.end_slot() => + return Some(( + self.current.0, + self.current.1, + epoch_0.into(), + EpochIdentifierPosition::Genesis0, + )), + (_, _, PersistedEpoch::Genesis(_, epoch_1)) + if slot >= epoch_1.start_slot() && slot < epoch_1.end_slot() => + return Some(( + self.current.0, + self.current.1, + epoch_1.into(), + EpochIdentifierPosition::Genesis1, + )), + (_, _, PersistedEpoch::Regular(epoch_n)) + if slot >= epoch_n.start_slot() && slot < epoch_n.end_slot() => + return Some(( + self.current.0, + self.current.1, + epoch_n.into(), + EpochIdentifierPosition::Regular, + )), + _ => {}, + }; + match &self.next { + Some((h, n, epoch_n)) if slot >= epoch_n.start_slot() && slot < epoch_n.end_slot() => + Some((*h, *n, epoch_n.into(), EpochIdentifierPosition::Regular)), + _ => None, + } + } + + /// Returns epoch data if it matches given identifier. + pub fn epoch(&self, id: &EpochIdentifier) -> Option<&E> { + match (&self.current, &self.next) { + ((h, n, e), _) if h == &id.hash && n == &id.number => match e { + PersistedEpoch::Genesis(ref epoch_0, _) + if id.position == EpochIdentifierPosition::Genesis0 => + Some(epoch_0), + PersistedEpoch::Genesis(_, ref epoch_1) + if id.position == EpochIdentifierPosition::Genesis1 => + Some(epoch_1), + PersistedEpoch::Regular(ref epoch_n) + if id.position == EpochIdentifierPosition::Regular => + Some(epoch_n), + _ => None, + }, + (_, Some((h, n, e))) + if h == &id.hash && + n == &id.number && id.position == EpochIdentifierPosition::Regular => + Some(e), + _ => None, + } + } + + /// Import a new gap epoch, potentially replacing an old epoch. + fn import(&mut self, slot: E::Slot, hash: Hash, number: Number, epoch: E) -> Result<(), E> { + match (&mut self.current, &mut self.next) { + ((_, _, PersistedEpoch::Genesis(_, epoch_1)), _) if slot == epoch_1.end_slot() => { + self.next = Some((hash, number, epoch)); + Ok(()) + }, + (_, Some((_, _, epoch_n))) if slot == epoch_n.end_slot() => { + let (cur_h, cur_n, cur_epoch) = + self.next.take().expect("Already matched as `Some`"); + self.current = (cur_h, cur_n, PersistedEpoch::Regular(cur_epoch)); + self.next = Some((hash, number, epoch)); + Ok(()) + }, + _ => Err(epoch), + } + } +} + /// Tree of all epoch changes across all *seen* forks. Data stored in tree is /// the hash and block number of the block signaling the epoch change, and the /// epoch that was signalled at that block. @@ -294,10 +435,14 @@ impl AsRef for IncrementedEpoch { /// same DAG entry, pinned to a specific block #1. /// /// Further epochs (epoch_2, ..., epoch_n) each get their own entry. -#[derive(Clone, Encode, Decode)] +/// +/// Also maintains a pair of epochs for the start of the gap, +/// as long as there's an active gap download after a warp sync. +#[derive(Clone, Encode, Decode, Debug)] pub struct EpochChanges { inner: ForkTree>, epochs: BTreeMap<(Hash, Number), PersistedEpoch>, + gap: Option>, } // create a fake header hash which hasn't been included in the chain. @@ -315,14 +460,14 @@ where Number: Ord, { fn default() -> Self { - EpochChanges { inner: ForkTree::new(), epochs: BTreeMap::new() } + EpochChanges { inner: ForkTree::new(), epochs: BTreeMap::new(), gap: None } } } impl EpochChanges where - Hash: PartialEq + Ord + AsRef<[u8]> + AsMut<[u8]> + Copy, - Number: Ord + One + Zero + Add + Sub + Copy, + Hash: PartialEq + Ord + AsRef<[u8]> + AsMut<[u8]> + Copy + std::fmt::Debug, + Number: Ord + One + Zero + Add + Sub + Copy + std::fmt::Debug, { /// Create a new epoch change. pub fn new() -> Self { @@ -335,6 +480,11 @@ where self.inner.rebalance() } + /// Clear gap epochs if any. + pub fn clear_gap(&mut self) { + self.gap = None; + } + /// Map the epoch changes from one storing data to a different one. pub fn map(self, mut f: F) -> EpochChanges where @@ -342,31 +492,15 @@ where F: FnMut(&Hash, &Number, E) -> B, { EpochChanges { - inner: self.inner.map(&mut |_, _, header| match header { - PersistedEpochHeader::Genesis(epoch_0, epoch_1) => PersistedEpochHeader::Genesis( - EpochHeader { start_slot: epoch_0.start_slot, end_slot: epoch_0.end_slot }, - EpochHeader { start_slot: epoch_1.start_slot, end_slot: epoch_1.end_slot }, - ), - PersistedEpochHeader::Regular(epoch_n) => - PersistedEpochHeader::Regular(EpochHeader { - start_slot: epoch_n.start_slot, - end_slot: epoch_n.end_slot, - }), + inner: self.inner.map(&mut |_, _, header: PersistedEpochHeader| header.map()), + gap: self.gap.map(|GapEpochs { current: (h, n, header), next }| GapEpochs { + current: (h, n, header.map(&h, &n, &mut f)), + next: next.map(|(h, n, e)| (h, n, f(&h, &n, e))), }), epochs: self .epochs .into_iter() - .map(|((hash, number), epoch)| { - let bepoch = match epoch { - PersistedEpoch::Genesis(epoch_0, epoch_1) => PersistedEpoch::Genesis( - f(&hash, &number, epoch_0), - f(&hash, &number, epoch_1), - ), - PersistedEpoch::Regular(epoch_n) => - PersistedEpoch::Regular(f(&hash, &number, epoch_n)), - }; - ((hash, number), bepoch) - }) + .map(|((hash, number), epoch)| ((hash, number), epoch.map(&hash, &number, &mut f))) .collect(), } } @@ -402,6 +536,9 @@ where /// Get a reference to an epoch with given identifier. pub fn epoch(&self, id: &EpochIdentifier) -> Option<&E> { + if let Some(e) = &self.gap.as_ref().and_then(|gap| gap.epoch(id)) { + return Some(e) + } self.epochs.get(&(id.hash, id.number)).and_then(|v| match v { PersistedEpoch::Genesis(ref epoch_0, _) if id.position == EpochIdentifierPosition::Genesis0 => @@ -537,6 +674,15 @@ where return Ok(Some(ViableEpochDescriptor::UnimportedGenesis(slot))) } + if let Some(gap) = &self.gap { + if let Some((hash, number, hdr, position)) = gap.matches(slot) { + return Ok(Some(ViableEpochDescriptor::Signaled( + EpochIdentifier { position, hash, number }, + hdr, + ))) + } + } + // We want to find the deepest node in the tree which is an ancestor // of our block and where the start slot of the epoch was before the // slot of our block. The genesis special-case doesn't need to look @@ -598,13 +744,30 @@ where ) -> Result<(), fork_tree::Error> { let is_descendent_of = descendent_of_builder.build_is_descendent_of(Some((hash, parent_hash))); - let header = PersistedEpochHeader::::from(&epoch.0); + let slot = epoch.as_ref().start_slot(); + let IncrementedEpoch(mut epoch) = epoch; + let header = PersistedEpochHeader::::from(&epoch); + + if let Some(gap) = &mut self.gap { + if let PersistedEpoch::Regular(e) = epoch { + epoch = match gap.import(slot, hash.clone(), number.clone(), e) { + Ok(()) => return Ok(()), + Err(e) => PersistedEpoch::Regular(e), + } + } + } else if epoch.is_genesis() && !self.epochs.values().all(|e| e.is_genesis()) { + // There's a genesis epoch imported when we already have an active epoch. + // This happens after the warp sync as the ancient blocks download start. + // We need to start tracking gap epochs here. + self.gap = Some(GapEpochs { current: (hash, number, epoch), next: None }); + return Ok(()) + } let res = self.inner.import(hash, number, header, &is_descendent_of); match res { Ok(_) | Err(fork_tree::Error::Duplicate) => { - self.epochs.insert((hash, number), epoch.0); + self.epochs.insert((hash, number), epoch); Ok(()) }, Err(e) => Err(e), @@ -916,4 +1079,182 @@ mod tests { assert!(epoch_for_x_child_before_genesis.is_none()); } } + + /// Test that ensures that the gap is not enabled when we import multiple genesis blocks. + #[test] + fn gap_is_not_enabled_when_multiple_genesis_epochs_are_imported() { + // X + // / + // 0 - A + // + let is_descendent_of = |base: &Hash, block: &Hash| -> Result { + match (base, *block) { + (b"0", _) => Ok(true), + _ => Ok(false), + } + }; + + let duration = 100; + + let make_genesis = |slot| Epoch { start_slot: slot, duration }; + + let mut epoch_changes = EpochChanges::new(); + let next_descriptor = (); + + // insert genesis epoch for A + { + let genesis_epoch_a_descriptor = epoch_changes + .epoch_descriptor_for_child_of(&is_descendent_of, b"0", 0, 100) + .unwrap() + .unwrap(); + + let incremented_epoch = epoch_changes + .viable_epoch(&genesis_epoch_a_descriptor, &make_genesis) + .unwrap() + .increment(next_descriptor.clone()); + + epoch_changes + .import(&is_descendent_of, *b"A", 1, *b"0", incremented_epoch) + .unwrap(); + } + + // insert genesis epoch for X + { + let genesis_epoch_x_descriptor = epoch_changes + .epoch_descriptor_for_child_of(&is_descendent_of, b"0", 0, 1000) + .unwrap() + .unwrap(); + + let incremented_epoch = epoch_changes + .viable_epoch(&genesis_epoch_x_descriptor, &make_genesis) + .unwrap() + .increment(next_descriptor.clone()); + + epoch_changes + .import(&is_descendent_of, *b"X", 1, *b"0", incremented_epoch) + .unwrap(); + } + + // Clearing the gap should be a no-op. + epoch_changes.clear_gap(); + + // Check that both epochs are available. + epoch_changes + .epoch_data_for_child_of(&is_descendent_of, b"A", 1, 101, &make_genesis) + .unwrap() + .unwrap(); + + epoch_changes + .epoch_data_for_child_of(&is_descendent_of, b"X", 1, 1001, &make_genesis) + .unwrap() + .unwrap(); + } + + #[test] + fn gap_epochs_advance() { + // 0 - 1 - 2 - 3 - .... 42 - 43 + let is_descendent_of = |base: &Hash, block: &Hash| -> Result { + match (base, *block) { + (b"0", _) => Ok(true), + (b"1", b) => Ok(b == *b"0"), + (b"2", b) => Ok(b == *b"1"), + (b"3", b) => Ok(b == *b"2"), + _ => Ok(false), + } + }; + + let duration = 100; + + let make_genesis = |slot| Epoch { start_slot: slot, duration }; + + let mut epoch_changes = EpochChanges::new(); + let next_descriptor = (); + + let epoch42 = Epoch { start_slot: 42, duration: 100 }; + let epoch43 = Epoch { start_slot: 43, duration: 100 }; + epoch_changes.reset(*b"0", *b"1", 4200, epoch42, epoch43); + assert!(epoch_changes.gap.is_none()); + + // Import a new genesis epoch, this should crate the gap. + let genesis_epoch_a_descriptor = epoch_changes + .epoch_descriptor_for_child_of(&is_descendent_of, b"0", 0, 100) + .unwrap() + .unwrap(); + + let incremented_epoch = epoch_changes + .viable_epoch(&genesis_epoch_a_descriptor, &make_genesis) + .unwrap() + .increment(next_descriptor.clone()); + + epoch_changes + .import(&is_descendent_of, *b"1", 1, *b"0", incremented_epoch) + .unwrap(); + assert!(epoch_changes.gap.is_some()); + + let genesis_epoch = epoch_changes + .epoch_descriptor_for_child_of(&is_descendent_of, b"0", 0, 100) + .unwrap() + .unwrap(); + + assert_eq!(genesis_epoch, ViableEpochDescriptor::UnimportedGenesis(100)); + + // Import more epochs and check that gap advances. + let import_epoch_1 = + epoch_changes.viable_epoch(&genesis_epoch, &make_genesis).unwrap().increment(()); + + let epoch_1 = import_epoch_1.as_ref().clone(); + epoch_changes + .import(&is_descendent_of, *b"1", 1, *b"0", import_epoch_1) + .unwrap(); + let genesis_epoch_data = epoch_changes.epoch_data(&genesis_epoch, &make_genesis).unwrap(); + let end_slot = genesis_epoch_data.end_slot(); + let x = epoch_changes + .epoch_data_for_child_of(&is_descendent_of, b"1", 1, end_slot, &make_genesis) + .unwrap() + .unwrap(); + + assert_eq!(x, epoch_1); + assert_eq!(epoch_changes.gap.as_ref().unwrap().current.0, *b"1"); + assert!(epoch_changes.gap.as_ref().unwrap().next.is_none()); + + let epoch_1_desriptor = epoch_changes + .epoch_descriptor_for_child_of(&is_descendent_of, b"1", 1, end_slot) + .unwrap() + .unwrap(); + let epoch_1 = epoch_changes.epoch_data(&epoch_1_desriptor, &make_genesis).unwrap(); + let import_epoch_2 = epoch_changes + .viable_epoch(&epoch_1_desriptor, &make_genesis) + .unwrap() + .increment(()); + let epoch_2 = import_epoch_2.as_ref().clone(); + epoch_changes + .import(&is_descendent_of, *b"2", 2, *b"1", import_epoch_2) + .unwrap(); + + let end_slot = epoch_1.end_slot(); + let x = epoch_changes + .epoch_data_for_child_of(&is_descendent_of, b"2", 2, end_slot, &make_genesis) + .unwrap() + .unwrap(); + assert_eq!(epoch_changes.gap.as_ref().unwrap().current.0, *b"1"); + assert_eq!(epoch_changes.gap.as_ref().unwrap().next.as_ref().unwrap().0, *b"2"); + assert_eq!(x, epoch_2); + + let epoch_2_desriptor = epoch_changes + .epoch_descriptor_for_child_of(&is_descendent_of, b"2", 2, end_slot) + .unwrap() + .unwrap(); + let import_epoch_3 = epoch_changes + .viable_epoch(&epoch_2_desriptor, &make_genesis) + .unwrap() + .increment(()); + epoch_changes + .import(&is_descendent_of, *b"3", 3, *b"2", import_epoch_3) + .unwrap(); + + assert_eq!(epoch_changes.gap.as_ref().unwrap().current.0, *b"2"); + + epoch_changes.clear_gap(); + assert!(epoch_changes.gap.is_none()); + } } diff --git a/client/consensus/epochs/src/migration.rs b/client/consensus/epochs/src/migration.rs index 49e08240df8c3..e4b685c6ffb18 100644 --- a/client/consensus/epochs/src/migration.rs +++ b/client/consensus/epochs/src/migration.rs @@ -30,9 +30,19 @@ pub struct EpochChangesV0 { inner: ForkTree>, } -/// Type alias for legacy definition of epoch changes. -pub type EpochChangesForV0 = +/// Legacy definition of epoch changes. +#[derive(Clone, Encode, Decode)] +pub struct EpochChangesV1 { + inner: ForkTree>, + epochs: BTreeMap<(Hash, Number), PersistedEpoch>, +} + +/// Type alias for v0 definition of epoch changes. +pub type EpochChangesV0For = EpochChangesV0<::Hash, NumberFor, Epoch>; +/// Type alias for v1 and v2 definition of epoch changes. +pub type EpochChangesV1For = + EpochChangesV1<::Hash, NumberFor, Epoch>; impl EpochChangesV0 where @@ -54,6 +64,17 @@ where header }); - EpochChanges { inner, epochs } + EpochChanges { inner, epochs, gap: None } + } +} + +impl EpochChangesV1 +where + Hash: PartialEq + Ord + Copy, + Number: Ord + Copy, +{ + /// Migrate the type into current epoch changes definition. + pub fn migrate(self) -> EpochChanges { + EpochChanges { inner: self.inner, epochs: self.epochs, gap: None } } } diff --git a/client/consensus/manual-seal/Cargo.toml b/client/consensus/manual-seal/Cargo.toml index d9ae8521c12f6..9b29bb3dc71c7 100644 --- a/client/consensus/manual-seal/Cargo.toml +++ b/client/consensus/manual-seal/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] description = "Manual sealing engine for Substrate" edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" diff --git a/client/consensus/pow/Cargo.toml b/client/consensus/pow/Cargo.toml index c71e11aef275e..c570c1ccbad27 100644 --- a/client/consensus/pow/Cargo.toml +++ b/client/consensus/pow/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] description = "PoW consensus algorithm for substrate" edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index 1f5781434ef71..7b1012888e869 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -41,13 +41,12 @@ mod worker; -pub use crate::worker::{MiningBuild, MiningMetadata, MiningWorker}; +pub use crate::worker::{MiningBuild, MiningHandle, MiningMetadata}; use crate::worker::UntilImportedOrTimeout; use codec::{Decode, Encode}; use futures::{Future, StreamExt}; use log::*; -use parking_lot::Mutex; use prometheus_endpoint::Registry; use sc_client_api::{self, backend::AuxStore, BlockOf, BlockchainEvents}; use sc_consensus::{ @@ -525,7 +524,7 @@ pub fn start_mining_worker( build_time: Duration, can_author_with: CAW, ) -> ( - Arc>::Proof>>>, + MiningHandle>::Proof>, impl Future, ) where @@ -543,12 +542,7 @@ where CAW: CanAuthorWith + Clone + Send + 'static, { let mut timer = UntilImportedOrTimeout::new(client.import_notification_stream(), timeout); - let worker = Arc::new(Mutex::new(MiningWorker { - build: None, - algorithm: algorithm.clone(), - block_import, - justification_sync_link, - })); + let worker = MiningHandle::new(algorithm.clone(), block_import, justification_sync_link); let worker_ret = worker.clone(); let task = async move { @@ -559,7 +553,7 @@ where if sync_oracle.is_major_syncing() { debug!(target: "pow", "Skipping proposal due to sync."); - worker.lock().on_major_syncing(); + worker.on_major_syncing(); continue } @@ -587,7 +581,7 @@ where continue } - if worker.lock().best_hash() == Some(best_hash) { + if worker.best_hash() == Some(best_hash) { continue } @@ -682,7 +676,7 @@ where proposal, }; - worker.lock().on_build(build); + worker.on_build(build); } }; diff --git a/client/consensus/pow/src/worker.rs b/client/consensus/pow/src/worker.rs index c0ca16ccad3aa..3faa18ece3188 100644 --- a/client/consensus/pow/src/worker.rs +++ b/client/consensus/pow/src/worker.rs @@ -22,6 +22,7 @@ use futures::{ }; use futures_timer::Delay; use log::*; +use parking_lot::Mutex; use sc_client_api::ImportNotifications; use sc_consensus::{BlockImportParams, BoxBlockImport, StateAction, StorageChanges}; use sp_consensus::{BlockOrigin, Proposal}; @@ -30,7 +31,16 @@ use sp_runtime::{ traits::{Block as BlockT, Header as HeaderT}, DigestItem, }; -use std::{borrow::Cow, collections::HashMap, pin::Pin, time::Duration}; +use std::{ + borrow::Cow, + collections::HashMap, + pin::Pin, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + time::Duration, +}; use crate::{PowAlgorithm, PowIntermediate, Seal, INTERMEDIATE_KEY, POW_ENGINE_ID}; @@ -60,21 +70,26 @@ pub struct MiningBuild< pub proposal: Proposal, Proof>, } +/// Version of the mining worker. +#[derive(Eq, PartialEq, Clone, Copy)] +pub struct Version(usize); + /// Mining worker that exposes structs to query the current mining build and submit mined blocks. -pub struct MiningWorker< +pub struct MiningHandle< Block: BlockT, Algorithm: PowAlgorithm, C: sp_api::ProvideRuntimeApi, L: sc_consensus::JustificationSyncLink, Proof, > { - pub(crate) build: Option>, - pub(crate) algorithm: Algorithm, - pub(crate) block_import: BoxBlockImport>, - pub(crate) justification_sync_link: L, + version: Arc, + algorithm: Arc, + justification_sync_link: Arc, + build: Arc>>>, + block_import: Arc>>>, } -impl MiningWorker +impl MiningHandle where Block: BlockT, C: sp_api::ProvideRuntimeApi, @@ -83,35 +98,65 @@ where L: sc_consensus::JustificationSyncLink, sp_api::TransactionFor: Send + 'static, { - /// Get the current best hash. `None` if the worker has just started or the client is doing - /// major syncing. - pub fn best_hash(&self) -> Option { - self.build.as_ref().map(|b| b.metadata.best_hash) + fn increment_version(&self) { + self.version.fetch_add(1, Ordering::SeqCst); } - pub(crate) fn on_major_syncing(&mut self) { - self.build = None; + pub(crate) fn new( + algorithm: Algorithm, + block_import: BoxBlockImport>, + justification_sync_link: L, + ) -> Self { + Self { + version: Arc::new(AtomicUsize::new(0)), + algorithm: Arc::new(algorithm), + justification_sync_link: Arc::new(justification_sync_link), + build: Arc::new(Mutex::new(None)), + block_import: Arc::new(Mutex::new(block_import)), + } } - pub(crate) fn on_build(&mut self, build: MiningBuild) { - self.build = Some(build); + pub(crate) fn on_major_syncing(&self) { + let mut build = self.build.lock(); + *build = None; + self.increment_version(); + } + + pub(crate) fn on_build(&self, value: MiningBuild) { + let mut build = self.build.lock(); + *build = Some(value); + self.increment_version(); + } + + /// Get the version of the mining worker. + /// + /// This returns type `Version` which can only compare equality. If `Version` is unchanged, then + /// it can be certain that `best_hash` and `metadata` were not changed. + pub fn version(&self) -> Version { + Version(self.version.load(Ordering::SeqCst)) + } + + /// Get the current best hash. `None` if the worker has just started or the client is doing + /// major syncing. + pub fn best_hash(&self) -> Option { + self.build.lock().as_ref().map(|b| b.metadata.best_hash) } /// Get a copy of the current mining metadata, if available. pub fn metadata(&self) -> Option> { - self.build.as_ref().map(|b| b.metadata.clone()) + self.build.lock().as_ref().map(|b| b.metadata.clone()) } /// Submit a mined seal. The seal will be validated again. Returns true if the submission is /// successful. - pub async fn submit(&mut self, seal: Seal) -> bool { - if let Some(build) = self.build.take() { + pub async fn submit(&self, seal: Seal) -> bool { + if let Some(metadata) = self.metadata() { match self.algorithm.verify( - &BlockId::Hash(build.metadata.best_hash), - &build.metadata.pre_hash, - build.metadata.pre_runtime.as_ref().map(|v| &v[..]), + &BlockId::Hash(metadata.best_hash), + &metadata.pre_hash, + metadata.pre_runtime.as_ref().map(|v| &v[..]), &seal, - build.metadata.difficulty, + metadata.difficulty, ) { Ok(true) => (), Ok(false) => { @@ -130,55 +175,92 @@ where return false }, } + } else { + warn!( + target: "pow", + "Unable to import mined block: metadata does not exist", + ); + return false + } - let seal = DigestItem::Seal(POW_ENGINE_ID, seal); - let (header, body) = build.proposal.block.deconstruct(); - - let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); - import_block.post_digests.push(seal); - import_block.body = Some(body); - import_block.state_action = - StateAction::ApplyChanges(StorageChanges::Changes(build.proposal.storage_changes)); - - let intermediate = PowIntermediate:: { - difficulty: Some(build.metadata.difficulty), - }; - - import_block - .intermediates - .insert(Cow::from(INTERMEDIATE_KEY), Box::new(intermediate) as Box<_>); - - let header = import_block.post_header(); - match self.block_import.import_block(import_block, HashMap::default()).await { - Ok(res) => { - res.handle_justification( - &header.hash(), - *header.number(), - &mut self.justification_sync_link, - ); - - info!( - target: "pow", - "✅ Successfully mined block on top of: {}", - build.metadata.best_hash - ); - true - }, - Err(err) => { - warn!( - target: "pow", - "Unable to import mined block: {:?}", - err, - ); - false - }, + let build = if let Some(build) = { + let mut build = self.build.lock(); + let value = build.take(); + if value.is_some() { + self.increment_version(); } + value + } { + build } else { warn!( target: "pow", "Unable to import mined block: build does not exist", ); - false + return false + }; + + let seal = DigestItem::Seal(POW_ENGINE_ID, seal); + let (header, body) = build.proposal.block.deconstruct(); + + let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); + import_block.post_digests.push(seal); + import_block.body = Some(body); + import_block.state_action = + StateAction::ApplyChanges(StorageChanges::Changes(build.proposal.storage_changes)); + + let intermediate = PowIntermediate:: { + difficulty: Some(build.metadata.difficulty), + }; + + import_block + .intermediates + .insert(Cow::from(INTERMEDIATE_KEY), Box::new(intermediate) as Box<_>); + + let header = import_block.post_header(); + let mut block_import = self.block_import.lock(); + + match block_import.import_block(import_block, HashMap::default()).await { + Ok(res) => { + res.handle_justification( + &header.hash(), + *header.number(), + &self.justification_sync_link, + ); + + info!( + target: "pow", + "✅ Successfully mined block on top of: {}", + build.metadata.best_hash + ); + true + }, + Err(err) => { + warn!( + target: "pow", + "Unable to import mined block: {:?}", + err, + ); + false + }, + } + } +} + +impl Clone for MiningHandle +where + Block: BlockT, + Algorithm: PowAlgorithm, + C: sp_api::ProvideRuntimeApi, + L: sc_consensus::JustificationSyncLink, +{ + fn clone(&self) -> Self { + Self { + version: self.version.clone(), + algorithm: self.algorithm.clone(), + justification_sync_link: self.justification_sync_link.clone(), + build: self.build.clone(), + block_import: self.block_import.clone(), } } } diff --git a/client/consensus/slots/Cargo.toml b/client/consensus/slots/Cargo.toml index 4c0142829bb5c..6c1f865c62cb0 100644 --- a/client/consensus/slots/Cargo.toml +++ b/client/consensus/slots/Cargo.toml @@ -6,7 +6,7 @@ description = "Generic slots-based utilities for consensus" edition = "2018" build = "build.rs" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" diff --git a/client/consensus/uncles/Cargo.toml b/client/consensus/uncles/Cargo.toml index 7e821db197b3c..73768f0d09411 100644 --- a/client/consensus/uncles/Cargo.toml +++ b/client/consensus/uncles/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] description = "Generic uncle inclusion utilities for consensus" edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index f9a81d6ce7ea2..53af082d3b91d 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -4,7 +4,7 @@ version = "0.10.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Client backend that uses RocksDB database as storage." readme = "README.md" diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 66adb64c0109e..3b8936c0f7bac 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -502,6 +502,11 @@ impl BlockchainDb { } } + fn update_block_gap(&self, gap: Option<(NumberFor, NumberFor)>) { + let mut meta = self.meta.write(); + meta.block_gap = gap; + } + // Get block changes trie root, if available. fn changes_trie_root(&self, block: BlockId) -> ClientResult> { self.header(block).map(|header| { @@ -538,6 +543,7 @@ impl sc_client_api::blockchain::HeaderBackend for Blockcha finalized_number: meta.finalized_number, finalized_state: meta.finalized_state.clone(), number_leaves: self.leaves.read().count(), + block_gap: meta.block_gap, } } @@ -1070,13 +1076,14 @@ impl FrozenForDuration { F: FnOnce() -> T, { let mut lock = self.value.lock(); - if lock.at.elapsed() > self.duration || lock.value.is_none() { + let now = std::time::Instant::now(); + if now.saturating_duration_since(lock.at) > self.duration || lock.value.is_none() { let new_value = f(); - lock.at = std::time::Instant::now(); + lock.at = now; lock.value = Some(new_value.clone()); new_value } else { - lock.value.as_ref().expect("checked with lock above").clone() + lock.value.as_ref().expect("Checked with in branch above; qed").clone() } } } @@ -1388,9 +1395,10 @@ impl Backend { operation.apply_offchain(&mut transaction); let mut meta_updates = Vec::with_capacity(operation.finalized_blocks.len()); - let mut last_finalized_hash = self.blockchain.meta.read().finalized_hash; - let mut last_finalized_num = self.blockchain.meta.read().finalized_number; - let best_num = self.blockchain.meta.read().best_number; + let (best_num, mut last_finalized_hash, mut last_finalized_num, mut block_gap) = { + let meta = self.blockchain.meta.read(); + (meta.best_number, meta.finalized_hash, meta.finalized_number, meta.block_gap.clone()) + }; let mut changes_trie_cache_ops = None; for (block, justification) in operation.finalized_blocks { @@ -1639,6 +1647,41 @@ impl Backend { children, ); } + + if let Some((mut start, end)) = block_gap { + if number == start { + start += One::one(); + utils::insert_number_to_key_mapping( + &mut transaction, + columns::KEY_LOOKUP, + number, + hash, + )?; + } + if start > end { + transaction.remove(columns::META, meta_keys::BLOCK_GAP); + block_gap = None; + debug!(target: "db", "Removed block gap."); + } else { + block_gap = Some((start, end)); + debug!(target: "db", "Update block gap. {:?}", block_gap); + transaction.set( + columns::META, + meta_keys::BLOCK_GAP, + &(start, end).encode(), + ); + } + } else if number > best_num + One::one() && + number > One::one() && self + .blockchain + .header(BlockId::hash(parent_hash))? + .is_none() + { + let gap = (best_num + One::one(), number - One::one()); + transaction.set(columns::META, meta_keys::BLOCK_GAP, &gap.encode()); + block_gap = Some(gap); + debug!(target: "db", "Detected block gap {:?}", block_gap); + } } meta_updates.push(MetaUpdate { @@ -1716,6 +1759,7 @@ impl Backend { for m in meta_updates { self.blockchain.update_meta(m); } + self.blockchain.update_block_gap(block_gap); Ok(()) } diff --git a/client/db/src/light.rs b/client/db/src/light.rs index bf2da5c61d058..48cf0489cf2a0 100644 --- a/client/db/src/light.rs +++ b/client/db/src/light.rs @@ -157,6 +157,7 @@ where None }, number_leaves: 1, + block_gap: None, } } diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index a895324a2e7b9..5fef0e5b12d08 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -1418,6 +1418,7 @@ mod qc { #[derive(Debug, Clone)] struct Node { hash: H256, + #[allow(unused)] parent: H256, state: KeyMap, changes: KeySet, diff --git a/client/db/src/utils.rs b/client/db/src/utils.rs index ea22c774f463e..0e895eaaf3851 100644 --- a/client/db/src/utils.rs +++ b/client/db/src/utils.rs @@ -54,6 +54,8 @@ pub mod meta_keys { pub const FINALIZED_BLOCK: &[u8; 5] = b"final"; /// Last finalized state key. pub const FINALIZED_STATE: &[u8; 6] = b"fstate"; + /// Block gap. + pub const BLOCK_GAP: &[u8; 3] = b"gap"; /// Meta information prefix for list-based caches. pub const CACHE_META_PREFIX: &[u8; 5] = b"cache"; /// Meta information for changes tries key. @@ -81,6 +83,8 @@ pub struct Meta { pub genesis_hash: H, /// Finalized state, if any pub finalized_state: Option<(H, N)>, + /// Block gap, start and end inclusive, if any. + pub block_gap: Option<(N, N)>, } /// A block lookup key: used for canonical lookup from block number to hash @@ -527,6 +531,7 @@ where finalized_number: Zero::zero(), genesis_hash: Default::default(), finalized_state: None, + block_gap: None, }), }; @@ -541,7 +546,7 @@ where "Opened blockchain db, fetched {} = {:?} ({})", desc, hash, - header.number() + header.number(), ); Ok((hash, *header.number())) } else { @@ -558,6 +563,10 @@ where } else { None }; + let block_gap = db + .get(COLUMN_META, meta_keys::BLOCK_GAP) + .and_then(|d| Decode::decode(&mut d.as_slice()).ok()); + debug!(target: "db", "block_gap={:?}", block_gap); Ok(Meta { best_hash, @@ -566,6 +575,7 @@ where finalized_number, genesis_hash, finalized_state, + block_gap, }) } diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml index b7e2595b8e169..c8246d43a0f89 100644 --- a/client/executor/Cargo.toml +++ b/client/executor/Cargo.toml @@ -4,7 +4,7 @@ version = "0.10.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "A crate that provides means of executing/dispatching calls into the runtime." documentation = "https://docs.rs/sc-executor" @@ -36,14 +36,14 @@ libsecp256k1 = "0.6" [dev-dependencies] wat = "1.0" -hex-literal = "0.3.1" +hex-literal = "0.3.3" sc-runtime-test = { version = "2.0.0", path = "runtime-test" } substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime" } sp-state-machine = { version = "0.10.0-dev", path = "../../primitives/state-machine" } sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } sp-maybe-compressed-blob = { version = "4.0.0-dev", path = "../../primitives/maybe-compressed-blob" } sc-tracing = { version = "4.0.0-dev", path = "../tracing" } -tracing = "0.1.25" +tracing = "0.1.29" tracing-subscriber = "0.2.19" paste = "1.0" regex = "1" diff --git a/client/executor/common/Cargo.toml b/client/executor/common/Cargo.toml index c4fc8c27f7544..622baa6c0dcab 100644 --- a/client/executor/common/Cargo.toml +++ b/client/executor/common/Cargo.toml @@ -4,7 +4,7 @@ version = "0.10.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "A set of common definitions that are needed for defining execution engines." documentation = "https://docs.rs/sc-executor-common/" diff --git a/client/executor/common/src/runtime_blob/globals_snapshot.rs b/client/executor/common/src/runtime_blob/globals_snapshot.rs index 6a29ff8bae365..a25fa6f9fd639 100644 --- a/client/executor/common/src/runtime_blob/globals_snapshot.rs +++ b/client/executor/common/src/runtime_blob/globals_snapshot.rs @@ -34,14 +34,14 @@ pub trait InstanceGlobals { /// Get a handle to a global by it's export name. /// /// The requested export is must exist in the exported list, and it should be a mutable global. - fn get_global(&self, export_name: &str) -> Self::Global; + fn get_global(&mut self, export_name: &str) -> Self::Global; /// Get the current value of the global. - fn get_global_value(&self, global: &Self::Global) -> sp_wasm_interface::Value; + fn get_global_value(&mut self, global: &Self::Global) -> sp_wasm_interface::Value; /// Update the current value of the global. /// /// The global behind the handle is guaranteed to be mutable and the value to be the same type /// as the global. - fn set_global_value(&self, global: &Self::Global, value: sp_wasm_interface::Value); + fn set_global_value(&mut self, global: &Self::Global, value: sp_wasm_interface::Value); } /// A set of exposed mutable globals. @@ -79,7 +79,10 @@ impl GlobalsSnapshot { /// /// This function panics if the instance doesn't correspond to the module from which the /// [`ExposedMutableGlobalsSet`] was collected. - pub fn take(mutable_globals: &ExposedMutableGlobalsSet, instance: &Instance) -> Self + pub fn take( + mutable_globals: &ExposedMutableGlobalsSet, + instance: &mut Instance, + ) -> Self where Instance: InstanceGlobals, { @@ -98,7 +101,7 @@ impl GlobalsSnapshot { /// Apply the snapshot to the given instance. /// /// This instance must be the same that was used for creation of this snapshot. - pub fn apply(&self, instance: &Instance) + pub fn apply(&self, instance: &mut Instance) where Instance: InstanceGlobals, { diff --git a/client/executor/common/src/util.rs b/client/executor/common/src/util.rs index 3ea29540f98ee..ffbeb8c7ab533 100644 --- a/client/executor/common/src/util.rs +++ b/client/executor/common/src/util.rs @@ -233,7 +233,7 @@ pub mod wasmer { let range = checked_range(dest_addr.into(), source.len(), destination.len()) .ok_or_else(|| Error::Other("memory write is out of bounds".into()))?; - &mut destination[range].copy_from_slice(source); + destination[range].copy_from_slice(source); Ok(()) } } diff --git a/client/executor/common/src/wasm_runtime.rs b/client/executor/common/src/wasm_runtime.rs index eb73909d9234f..1e9f1225518a3 100644 --- a/client/executor/common/src/wasm_runtime.rs +++ b/client/executor/common/src/wasm_runtime.rs @@ -78,21 +78,21 @@ pub trait WasmInstance: Send { /// Before execution, instance is reset. /// /// Returns the encoded result on success. - fn call(&self, method: InvokeMethod, data: &[u8]) -> Result, Error>; + fn call(&mut self, method: InvokeMethod, data: &[u8]) -> Result, Error>; /// Call an exported method on this WASM instance. /// /// Before execution, instance is reset. /// /// Returns the encoded result on success. - fn call_export(&self, method: &str, data: &[u8]) -> Result, Error> { + fn call_export(&mut self, method: &str, data: &[u8]) -> Result, Error> { self.call(method.into(), data) } /// Get the value from a global with the given `name`. /// /// This method is only suitable for getting immutable globals. - fn get_global_const(&self, name: &str) -> Result, Error>; + fn get_global_const(&mut self, name: &str) -> Result, Error>; /// **Testing Only**. This function returns the base address of the linear memory. /// diff --git a/client/executor/runtime-test/Cargo.toml b/client/executor/runtime-test/Cargo.toml index a4fbc88cf5662..ac1e3413491d9 100644 --- a/client/executor/runtime-test/Cargo.toml +++ b/client/executor/runtime-test/Cargo.toml @@ -6,7 +6,7 @@ edition = "2018" build = "build.rs" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" publish = false -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" [package.metadata.docs.rs] diff --git a/client/executor/runtime-test/src/lib.rs b/client/executor/runtime-test/src/lib.rs index c9f7d6b1e2970..2b5699fa3f77a 100644 --- a/client/executor/runtime-test/src/lib.rs +++ b/client/executor/runtime-test/src/lib.rs @@ -91,7 +91,7 @@ sp_core::wasm_export_functions! { // This function dirties the **host** pages. I.e. we dirty 4KiB at a time and it will take // 16 writes to process a single wasm page. - let mut heap_ptr = heap_base as usize; + let heap_ptr = heap_base as usize; // Find the next wasm page boundary. let heap_ptr = round_up_to(heap_ptr, 65536); @@ -234,7 +234,7 @@ sp_core::wasm_export_functions! { match instance.get_global_val("test_global") { Some(sp_sandbox::Value::I64(val)) => val, None => 30, - val => 40, + _ => 40, } } @@ -362,7 +362,7 @@ sp_core::wasm_export_functions! { // It is expected that the given pointer is not allocated. fn check_and_set_in_heap(heap_base: u32, offset: u32) { let test_message = b"Hello invalid heap memory"; - let ptr = unsafe { (heap_base + offset) as *mut u8 }; + let ptr = (heap_base + offset) as *mut u8; let message_slice = unsafe { sp_std::slice::from_raw_parts_mut(ptr, test_message.len()) }; diff --git a/client/executor/src/integration_tests/linux.rs b/client/executor/src/integration_tests/linux.rs index 7e0696973dc77..38e57707e9e6b 100644 --- a/client/executor/src/integration_tests/linux.rs +++ b/client/executor/src/integration_tests/linux.rs @@ -40,7 +40,7 @@ fn memory_consumption_compiled() { let runtime = mk_test_runtime(WasmExecutionMethod::Compiled, 1024); - let instance = runtime.new_instance().unwrap(); + let mut instance = runtime.new_instance().unwrap(); let heap_base = instance .get_global_const("__heap_base") .expect("`__heap_base` is valid") diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index dabead4799dc8..fe964f47ba374 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -467,7 +467,7 @@ test_wasm_execution!(returns_mutable_static); fn returns_mutable_static(wasm_method: WasmExecutionMethod) { let runtime = mk_test_runtime(wasm_method, 1024); - let instance = runtime.new_instance().unwrap(); + let mut instance = runtime.new_instance().unwrap(); let res = instance.call_export("returns_mutable_static", &[0]).unwrap(); assert_eq!(33, u64::decode(&mut &res[..]).unwrap()); @@ -482,7 +482,7 @@ test_wasm_execution!(returns_mutable_static_bss); fn returns_mutable_static_bss(wasm_method: WasmExecutionMethod) { let runtime = mk_test_runtime(wasm_method, 1024); - let instance = runtime.new_instance().unwrap(); + let mut instance = runtime.new_instance().unwrap(); let res = instance.call_export("returns_mutable_static_bss", &[0]).unwrap(); assert_eq!(1, u64::decode(&mut &res[..]).unwrap()); @@ -508,7 +508,7 @@ fn restoration_of_globals(wasm_method: WasmExecutionMethod) { const REQUIRED_MEMORY_PAGES: u64 = 32; let runtime = mk_test_runtime(wasm_method, REQUIRED_MEMORY_PAGES); - let instance = runtime.new_instance().unwrap(); + let mut instance = runtime.new_instance().unwrap(); // On the first invocation we allocate approx. 768KB (75%) of stack and then trap. let res = instance.call_export("allocates_huge_stack_array", &true.encode()); @@ -522,7 +522,7 @@ fn restoration_of_globals(wasm_method: WasmExecutionMethod) { test_wasm_execution!(interpreted_only heap_is_reset_between_calls); fn heap_is_reset_between_calls(wasm_method: WasmExecutionMethod) { let runtime = mk_test_runtime(wasm_method, 1024); - let instance = runtime.new_instance().unwrap(); + let mut instance = runtime.new_instance().unwrap(); let heap_base = instance .get_global_const("__heap_base") diff --git a/client/executor/src/native_executor.rs b/client/executor/src/native_executor.rs index 38dba55b5f87c..d912fc0fd13c9 100644 --- a/client/executor/src/native_executor.rs +++ b/client/executor/src/native_executor.rs @@ -101,8 +101,6 @@ pub struct WasmExecutor { host_functions: Arc>, /// WASM runtime cache. cache: Arc, - /// The size of the instances cache. - max_runtime_instances: usize, /// The path to a directory which the executor can leverage for a file cache, e.g. put there /// compiled artifacts. cache_path: Option, @@ -138,7 +136,6 @@ impl WasmExecutor { default_heap_pages: default_heap_pages.unwrap_or(DEFAULT_HEAP_PAGES), host_functions: Arc::new(host_functions), cache: Arc::new(RuntimeCache::new(max_runtime_instances, cache_path.clone())), - max_runtime_instances, cache_path, } } @@ -166,7 +163,7 @@ impl WasmExecutor { where F: FnOnce( AssertUnwindSafe<&Arc>, - AssertUnwindSafe<&dyn WasmInstance>, + AssertUnwindSafe<&mut dyn WasmInstance>, Option<&RuntimeVersion>, AssertUnwindSafe<&mut dyn Externalities>, ) -> Result>, @@ -192,7 +189,7 @@ impl WasmExecutor { /// Perform a call into the given runtime. /// - /// The runtime is passed as a [`RuntimeBlob`]. The runtime will be isntantiated with the + /// The runtime is passed as a [`RuntimeBlob`]. The runtime will be instantiated with the /// parameters this `WasmExecutor` was initialized with. /// /// In case of problems with during creation of the runtime or instantation, a `Err` is @@ -220,7 +217,7 @@ impl WasmExecutor { .new_instance() .map_err(|e| format!("Failed to create instance: {:?}", e))?; - let instance = AssertUnwindSafe(instance); + let mut instance = AssertUnwindSafe(instance); let mut ext = AssertUnwindSafe(ext); let module = AssertUnwindSafe(module); @@ -250,7 +247,7 @@ impl sp_core::traits::ReadRuntimeVersion for WasmExecutor { } // If the blob didn't have embedded runtime version section, we fallback to the legacy - // way of fetching the verison: i.e. instantiating the given instance and calling + // way of fetching the version: i.e. instantiating the given instance and calling // `Core_version` on it. self.uncached_call( @@ -286,7 +283,7 @@ impl CodeExecutor for WasmExecutor { runtime_code, ext, false, - |module, instance, _onchain_version, mut ext| { + |module, mut instance, _onchain_version, mut ext| { with_externalities_safe(&mut **ext, move || { preregister_builtin_ext(module.clone()); instance.call_export(method, data).map(NativeOrEncoded::Encoded) @@ -441,7 +438,7 @@ impl RuntimeSpawn for RuntimeInstanceSpawn { // pool of instances should be used. // // https://github.com/paritytech/substrate/issues/7354 - let instance = + let mut instance = module.new_instance().expect("Failed to create new instance from module"); instance @@ -528,7 +525,7 @@ impl CodeExecutor for NativeElseWasmExecut runtime_code, ext, false, - |module, instance, onchain_version, mut ext| { + |module, mut instance, onchain_version, mut ext| { let onchain_version = onchain_version.ok_or_else(|| Error::ApiError("Unknown version".into()))?; diff --git a/client/executor/src/wasm_runtime.rs b/client/executor/src/wasm_runtime.rs index b11e3958dbc81..c7aa1200719de 100644 --- a/client/executor/src/wasm_runtime.rs +++ b/client/executor/src/wasm_runtime.rs @@ -76,7 +76,7 @@ impl VersionedRuntime { where F: FnOnce( &Arc, - &dyn WasmInstance, + &mut dyn WasmInstance, Option<&RuntimeVersion>, &mut dyn Externalities, ) -> Result, @@ -90,12 +90,12 @@ impl VersionedRuntime { match instance { Some((index, mut locked)) => { - let (instance, new_inst) = locked + let (mut instance, new_inst) = locked .take() .map(|r| Ok((r, false))) .unwrap_or_else(|| self.module.new_instance().map(|i| (i, true)))?; - let result = f(&self.module, &*instance, self.version.as_ref(), ext); + let result = f(&self.module, &mut *instance, self.version.as_ref(), ext); if let Err(e) = &result { if new_inst { log::warn!( @@ -129,9 +129,9 @@ impl VersionedRuntime { log::warn!(target: "wasm-runtime", "Ran out of free WASM instances"); // Allocate a new instance - let instance = self.module.new_instance()?; + let mut instance = self.module.new_instance()?; - f(&self.module, &*instance, self.version.as_ref(), ext) + f(&self.module, &mut *instance, self.version.as_ref(), ext) }, } } @@ -213,7 +213,7 @@ impl RuntimeCache { where F: FnOnce( &Arc, - &dyn WasmInstance, + &mut dyn WasmInstance, Option<&RuntimeVersion>, &mut dyn Externalities, ) -> Result, @@ -304,7 +304,7 @@ pub fn create_wasm_runtime_with_code( // // We drop the cache_path here to silence warnings that cache_path is not used if // compiling without the `wasmtime` flag. - drop(cache_path); + let _ = cache_path; sc_executor_wasmi::create_runtime( blob, @@ -318,14 +318,15 @@ pub fn create_wasm_runtime_with_code( WasmExecutionMethod::Compiled => sc_executor_wasmtime::create_runtime( blob, sc_executor_wasmtime::Config { - heap_pages: heap_pages as u32, - max_memory_pages: None, + heap_pages, + max_memory_size: None, allow_missing_func_imports, cache_path: cache_path.map(ToOwned::to_owned), semantics: sc_executor_wasmtime::Semantics { fast_instance_reuse: true, deterministic_stack_limit: None, canonicalize_nans: false, + parallel_compilation: true, }, }, host_functions, diff --git a/client/executor/wasmi/Cargo.toml b/client/executor/wasmi/Cargo.toml index 324b2bdd0baeb..307ba908e23b6 100644 --- a/client/executor/wasmi/Cargo.toml +++ b/client/executor/wasmi/Cargo.toml @@ -4,7 +4,7 @@ version = "0.10.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "This crate provides an implementation of `WasmRuntime` that is baked by wasmi." documentation = "https://docs.rs/sc-executor-wasmi" diff --git a/client/executor/wasmi/src/lib.rs b/client/executor/wasmi/src/lib.rs index 6052662fa7ccf..ceab07c2f71cb 100644 --- a/client/executor/wasmi/src/lib.rs +++ b/client/executor/wasmi/src/lib.rs @@ -736,7 +736,7 @@ pub struct WasmiInstance { unsafe impl Send for WasmiInstance {} impl WasmInstance for WasmiInstance { - fn call(&self, method: InvokeMethod, data: &[u8]) -> Result, Error> { + fn call(&mut self, method: InvokeMethod, data: &[u8]) -> Result, Error> { // We reuse a single wasm instance for multiple calls and a previous call (if any) // altered the state. Therefore, we need to restore the instance to original state. @@ -767,7 +767,7 @@ impl WasmInstance for WasmiInstance { ) } - fn get_global_const(&self, name: &str) -> Result, Error> { + fn get_global_const(&mut self, name: &str) -> Result, Error> { match self.instance.export_by_name(name) { Some(global) => Ok(Some( global diff --git a/client/executor/wasmtime/Cargo.toml b/client/executor/wasmtime/Cargo.toml index 3158cdecc3263..9fb76ed08fd9a 100644 --- a/client/executor/wasmtime/Cargo.toml +++ b/client/executor/wasmtime/Cargo.toml @@ -4,7 +4,7 @@ version = "0.10.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Defines a `WasmRuntime` that uses the Wasmtime JIT to execute." readme = "README.md" @@ -16,7 +16,6 @@ targets = ["x86_64-unknown-linux-gnu"] libc = "0.2.90" cfg-if = "1.0" log = "0.4.8" -scoped-tls = "1.0" parity-wasm = "0.42.0" codec = { package = "parity-scale-codec", version = "2.0.0" } sc-executor-common = { version = "0.10.0-dev", path = "../common" } @@ -24,8 +23,10 @@ sp-wasm-interface = { version = "4.0.0-dev", path = "../../../primitives/wasm-in sp-runtime-interface = { version = "4.0.0-dev", path = "../../../primitives/runtime-interface" } sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } sc-allocator = { version = "4.0.0-dev", path = "../../allocator" } -wasmtime = { version = "0.27.0", default-features = false, features = [ +wasmtime = { version = "0.30.0", default-features = false, features = [ "cache", + "cranelift", + "jitdump", "parallel-compilation", ] } diff --git a/client/executor/wasmtime/src/host.rs b/client/executor/wasmtime/src/host.rs index 8453ec3954354..4edb9f9c423f0 100644 --- a/client/executor/wasmtime/src/host.rs +++ b/client/executor/wasmtime/src/host.rs @@ -19,7 +19,7 @@ //! This module defines `HostState` and `HostContext` structs which provide logic and state //! required for execution of host. -use crate::instance_wrapper::InstanceWrapper; +use crate::{instance_wrapper::InstanceWrapper, runtime::StoreData}; use codec::{Decode, Encode}; use log::trace; use sc_allocator::FreeingBumpHeapAllocator; @@ -31,7 +31,7 @@ use sc_executor_common::{ use sp_core::sandbox as sandbox_primitives; use sp_wasm_interface::{FunctionContext, MemoryId, Pointer, Sandbox, WordSize}; use std::{cell::RefCell, rc::Rc}; -use wasmtime::{Func, Val}; +use wasmtime::{Caller, Func, Val}; /// The state required to construct a HostContext context. The context only lasts for one host /// call, whereas the state is maintained for the duration of a Wasm runtime call, which may make @@ -64,45 +64,67 @@ impl HostState { } /// Materialize `HostContext` that can be used to invoke a substrate host `dyn Function`. - pub fn materialize<'a>(&'a self) -> HostContext<'a> { - HostContext(self) + pub(crate) fn materialize<'a, 'b, 'c>( + &'a self, + caller: &'b mut Caller<'c, StoreData>, + ) -> HostContext<'a, 'b, 'c> { + HostContext { host_state: self, caller } } } /// A `HostContext` implements `FunctionContext` for making host calls from a Wasmtime /// runtime. The `HostContext` exists only for the lifetime of the call and borrows state from /// a longer-living `HostState`. -pub struct HostContext<'a>(&'a HostState); +pub(crate) struct HostContext<'a, 'b, 'c> { + host_state: &'a HostState, + caller: &'b mut Caller<'c, StoreData>, +} -impl<'a> std::ops::Deref for HostContext<'a> { +impl<'a, 'b, 'c> std::ops::Deref for HostContext<'a, 'b, 'c> { type Target = HostState; fn deref(&self) -> &HostState { - self.0 + self.host_state } } -impl<'a> sp_wasm_interface::FunctionContext for HostContext<'a> { +impl<'a, 'b, 'c> sp_wasm_interface::FunctionContext for HostContext<'a, 'b, 'c> { fn read_memory_into( &self, address: Pointer, dest: &mut [u8], ) -> sp_wasm_interface::Result<()> { - self.instance.read_memory_into(address, dest).map_err(|e| e.to_string()) + let ctx = &self.caller; + self.host_state + .instance + .read_memory_into(ctx, address, dest) + .map_err(|e| e.to_string()) } fn write_memory(&mut self, address: Pointer, data: &[u8]) -> sp_wasm_interface::Result<()> { - self.instance.write_memory_from(address, data).map_err(|e| e.to_string()) + let ctx = &mut self.caller; + self.host_state + .instance + .write_memory_from(ctx, address, data) + .map_err(|e| e.to_string()) } fn allocate_memory(&mut self, size: WordSize) -> sp_wasm_interface::Result> { - self.instance - .allocate(&mut *self.allocator.borrow_mut(), size) + let ctx = &mut self.caller; + let allocator = &self.host_state.allocator; + + self.host_state + .instance + .allocate(ctx, &mut *allocator.borrow_mut(), size) .map_err(|e| e.to_string()) } fn deallocate_memory(&mut self, ptr: Pointer) -> sp_wasm_interface::Result<()> { - self.instance - .deallocate(&mut *self.allocator.borrow_mut(), ptr) + let ctx = &mut self.caller; + let allocator = &self.host_state.allocator; + + self.host_state + .instance + .deallocate(ctx, &mut *allocator.borrow_mut(), ptr) .map_err(|e| e.to_string()) } @@ -111,7 +133,7 @@ impl<'a> sp_wasm_interface::FunctionContext for HostContext<'a> { } } -impl<'a> Sandbox for HostContext<'a> { +impl<'a, 'b, 'c> Sandbox for HostContext<'a, 'b, 'c> { fn memory_get( &mut self, memory_id: MemoryId, @@ -129,7 +151,8 @@ impl<'a> Sandbox for HostContext<'a> { Ok(buffer) => buffer, }; - if let Err(_) = self.instance.write_memory_from(buf_ptr, &buffer) { + let instance = self.instance.clone(); + if let Err(_) = instance.write_memory_from(&mut self.caller, buf_ptr, &buffer) { return Ok(sandbox_primitives::ERR_OUT_OF_BOUNDS) } @@ -148,7 +171,7 @@ impl<'a> Sandbox for HostContext<'a> { let len = val_len as usize; - let buffer = match self.instance.read_memory(val_ptr, len) { + let buffer = match self.instance.read_memory(&self.caller, val_ptr, len) { Err(_) => return Ok(sandbox_primitives::ERR_OUT_OF_BOUNDS), Ok(buffer) => buffer, }; @@ -241,12 +264,14 @@ impl<'a> Sandbox for HostContext<'a> { ) -> sp_wasm_interface::Result { // Extract a dispatch thunk from the instance's table by the specified index. let dispatch_thunk = { + let ctx = &mut self.caller; let table_item = self + .host_state .instance .table() .as_ref() .ok_or_else(|| "Runtime doesn't have a table; sandbox is unavailable")? - .get(dispatch_thunk_id); + .get(ctx, dispatch_thunk_id); table_item .ok_or_else(|| "dispatch_thunk_id is out of bounds")? @@ -295,12 +320,12 @@ impl<'a> Sandbox for HostContext<'a> { } } -struct SandboxContext<'a, 'b> { - host_context: &'a mut HostContext<'b>, +struct SandboxContext<'a, 'b, 'c, 'd> { + host_context: &'a mut HostContext<'b, 'c, 'd>, dispatch_thunk: Func, } -impl<'a, 'b> sandbox::SandboxContext for SandboxContext<'a, 'b> { +impl<'a, 'b, 'c, 'd> sandbox::SandboxContext for SandboxContext<'a, 'b, 'c, 'd> { fn invoke( &mut self, invoke_args_ptr: Pointer, @@ -308,12 +333,16 @@ impl<'a, 'b> sandbox::SandboxContext for SandboxContext<'a, 'b> { state: u32, func_idx: SupervisorFuncIndex, ) -> Result { - let result = self.dispatch_thunk.call(&[ - Val::I32(u32::from(invoke_args_ptr) as i32), - Val::I32(invoke_args_len as i32), - Val::I32(state as i32), - Val::I32(usize::from(func_idx) as i32), - ]); + let result = self.dispatch_thunk.call( + &mut self.host_context.caller, + &[ + Val::I32(u32::from(invoke_args_ptr) as i32), + Val::I32(invoke_args_len as i32), + Val::I32(state as i32), + Val::I32(usize::from(func_idx) as i32), + ], + ); + match result { Ok(ret_vals) => { let ret_val = if ret_vals.len() != 1 { diff --git a/client/executor/wasmtime/src/imports.rs b/client/executor/wasmtime/src/imports.rs index b27fb944bc030..a00ab14263e7f 100644 --- a/client/executor/wasmtime/src/imports.rs +++ b/client/executor/wasmtime/src/imports.rs @@ -16,13 +16,15 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use crate::{state_holder, util}; +use crate::{ + runtime::{Store, StoreData}, + util, +}; use sc_executor_common::error::WasmError; use sp_wasm_interface::{Function, ValueType}; -use std::any::Any; +use std::{any::Any, convert::TryInto}; use wasmtime::{ - Extern, ExternType, Func, FuncType, ImportType, Limits, Memory, MemoryType, Module, Store, - Trap, Val, + Caller, Extern, ExternType, Func, FuncType, ImportType, Memory, MemoryType, Module, Trap, Val, }; pub struct Imports { @@ -34,11 +36,11 @@ pub struct Imports { /// Goes over all imports of a module and prepares a vector of `Extern`s that can be used for /// instantiation of the module. Returns an error if there are imports that cannot be satisfied. -pub fn resolve_imports( - store: &Store, +pub(crate) fn resolve_imports( + store: &mut Store, module: &Module, host_functions: &[&'static dyn Function], - heap_pages: u32, + heap_pages: u64, allow_missing_func_imports: bool, ) -> Result { let mut externs = vec![]; @@ -78,9 +80,9 @@ fn import_name<'a, 'b: 'a>(import: &'a ImportType<'b>) -> Result<&'a str, WasmEr } fn resolve_memory_import( - store: &Store, + store: &mut Store, import_ty: &ImportType, - heap_pages: u32, + heap_pages: u64, ) -> Result { let requested_memory_ty = match import_ty.ty() { ExternType::Memory(memory_ty) => memory_ty, @@ -94,8 +96,8 @@ fn resolve_memory_import( // Increment the min (a.k.a initial) number of pages by `heap_pages` and check if it exceeds the // maximum specified by the import. - let initial = requested_memory_ty.limits().min().saturating_add(heap_pages); - if let Some(max) = requested_memory_ty.limits().max() { + let initial = requested_memory_ty.minimum().saturating_add(heap_pages); + if let Some(max) = requested_memory_ty.maximum() { if initial > max { return Err(WasmError::Other(format!( "incremented number of pages by heap_pages (total={}) is more than maximum requested\ @@ -106,7 +108,27 @@ fn resolve_memory_import( } } - let memory_ty = MemoryType::new(Limits::new(initial, requested_memory_ty.limits().max())); + // Note that the return value of `maximum` and `minimum`, while a u64, + // will always fit into a u32 for 32-bit memories. + // 64-bit memories are part of the memory64 proposal for WebAssembly which is not standardized + // yet. + let minimum: u32 = initial.try_into().map_err(|_| { + WasmError::Other(format!( + "minimum number of memory pages ({}) doesn't fit into u32", + initial + )) + })?; + let maximum: Option = match requested_memory_ty.maximum() { + Some(max) => Some(max.try_into().map_err(|_| { + WasmError::Other(format!( + "maximum number of memory pages ({}) doesn't fit into u32", + max + )) + })?), + None => None, + }; + + let memory_ty = MemoryType::new(minimum, maximum); let memory = Memory::new(store, memory_ty).map_err(|e| { WasmError::Other(format!( "failed to create a memory during resolving of memory import: {}", @@ -117,7 +139,7 @@ fn resolve_memory_import( } fn resolve_func_import( - store: &Store, + store: &mut Store, import_ty: &ImportType, host_functions: &[&'static dyn Function], allow_missing_func_imports: bool, @@ -162,19 +184,27 @@ struct HostFuncHandler { host_func: &'static dyn Function, } -fn call_static( +fn call_static<'a>( static_func: &'static dyn Function, wasmtime_params: &[Val], wasmtime_results: &mut [Val], + mut caller: Caller<'a, StoreData>, ) -> Result<(), wasmtime::Trap> { - let unwind_result = state_holder::with_context(|host_ctx| { - let mut host_ctx = host_ctx.expect( - "host functions can be called only from wasm instance; - wasm instance is always called initializing context; - therefore host_ctx cannot be None; - qed - ", - ); + let unwind_result = { + let host_state = caller + .data() + .host_state() + .expect( + "host functions can be called only from wasm instance; + wasm instance is always called initializing context; + therefore host_ctx cannot be None; + qed + ", + ) + .clone(); + + let mut host_ctx = host_state.materialize(&mut caller); + // `from_wasmtime_val` panics if it encounters a value that doesn't fit into the values // available in substrate. // @@ -185,7 +215,7 @@ fn call_static( std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { static_func.execute(&mut host_ctx, &mut params) })) - }); + }; let execution_result = match unwind_result { Ok(execution_result) => execution_result, @@ -219,11 +249,11 @@ impl HostFuncHandler { Self { host_func } } - fn into_extern(self, store: &Store) -> Extern { + fn into_extern(self, store: &mut Store) -> Extern { let host_func = self.host_func; let func_ty = wasmtime_func_sig(self.host_func); - let func = Func::new(store, func_ty, move |_, params, result| { - call_static(host_func, params, result) + let func = Func::new(store, func_ty, move |caller, params, result| { + call_static(host_func, params, result, caller) }); Extern::Func(func) } @@ -243,7 +273,7 @@ impl MissingHostFuncHandler { }) } - fn into_extern(self, store: &Store, func_ty: &FuncType) -> Extern { + fn into_extern(self, store: &mut Store, func_ty: &FuncType) -> Extern { let Self { module, name } = self; let func = Func::new(store, func_ty.clone(), move |_, _, _| { Err(Trap::new(format!("call to a missing function {}:{}", module, name))) diff --git a/client/executor/wasmtime/src/instance_wrapper.rs b/client/executor/wasmtime/src/instance_wrapper.rs index f66d62f673d90..2b8508ee2b07f 100644 --- a/client/executor/wasmtime/src/instance_wrapper.rs +++ b/client/executor/wasmtime/src/instance_wrapper.rs @@ -19,20 +19,18 @@ //! Defines data and logic needed for interaction with an WebAssembly instance of a substrate //! runtime module. -use crate::{ - imports::Imports, - util::{from_wasmtime_val, into_wasmtime_val}, -}; +use crate::imports::Imports; use sc_executor_common::{ error::{Error, Result}, - runtime_blob, util::checked_range, wasm_runtime::InvokeMethod, }; use sp_wasm_interface::{Pointer, Value, WordSize}; -use std::{marker, slice}; -use wasmtime::{Extern, Func, Global, Instance, Memory, Module, Store, Table, Val}; +use std::marker; +use wasmtime::{ + AsContext, AsContextMut, Extern, Func, Global, Instance, Memory, Module, Table, Val, +}; /// Invoked entrypoint format. pub enum EntryPointType { @@ -58,7 +56,12 @@ pub struct EntryPoint { impl EntryPoint { /// Call this entry point. - pub fn call(&self, data_ptr: Pointer, data_len: WordSize) -> Result { + pub fn call( + &self, + ctx: impl AsContextMut, + data_ptr: Pointer, + data_len: WordSize, + ) -> Result { let data_ptr = u32::from(data_ptr); let data_len = u32::from(data_len); @@ -68,15 +71,18 @@ impl EntryPoint { match self.call_type { EntryPointType::Direct { ref entrypoint } => - entrypoint.call((data_ptr, data_len)).map_err(handle_trap), + entrypoint.call(ctx, (data_ptr, data_len)).map_err(handle_trap), EntryPointType::Wrapped { func, ref dispatcher } => - dispatcher.call((func, data_ptr, data_len)).map_err(handle_trap), + dispatcher.call(ctx, (func, data_ptr, data_len)).map_err(handle_trap), } } - pub fn direct(func: wasmtime::Func) -> std::result::Result { + pub fn direct( + func: wasmtime::Func, + ctx: impl AsContext, + ) -> std::result::Result { let entrypoint = func - .typed::<(u32, u32), u64>() + .typed::<(u32, u32), u64, _>(ctx) .map_err(|_| "Invalid signature for direct entry point")? .clone(); Ok(Self { call_type: EntryPointType::Direct { entrypoint } }) @@ -85,9 +91,10 @@ impl EntryPoint { pub fn wrapped( dispatcher: wasmtime::Func, func: u32, + ctx: impl AsContext, ) -> std::result::Result { let dispatcher = dispatcher - .typed::<(u32, u32, u32), u64>() + .typed::<(u32, u32, u32), u64, _>(ctx) .map_err(|_| "Invalid signature for wrapped entry point")? .clone(); Ok(Self { call_type: EntryPointType::Wrapped { func, dispatcher } }) @@ -144,8 +151,13 @@ fn extern_func(extern_: &Extern) -> Option<&Func> { impl InstanceWrapper { /// Create a new instance wrapper from the given wasm module. - pub fn new(store: &Store, module: &Module, imports: &Imports, heap_pages: u32) -> Result { - let instance = Instance::new(store, module, &imports.externs) + pub fn new( + module: &Module, + imports: &Imports, + heap_pages: u64, + mut ctx: impl AsContextMut, + ) -> Result { + let instance = Instance::new(&mut ctx, module, &imports.externs) .map_err(|e| Error::from(format!("cannot instantiate: {}", e)))?; let memory = match imports.memory_import_index { @@ -153,51 +165,55 @@ impl InstanceWrapper { .expect("only memory can be at the `memory_idx`; qed") .clone(), None => { - let memory = get_linear_memory(&instance)?; - if !memory.grow(heap_pages).is_ok() { - return Err("failed to increase the linear memory size".into()) + let memory = get_linear_memory(&instance, &mut ctx)?; + if !memory.grow(&mut ctx, heap_pages).is_ok() { + return Err("failed top increase the linear memory size".into()) } memory }, }; - Ok(Self { - table: get_table(&instance), - instance, - memory, - _not_send_nor_sync: marker::PhantomData, - }) + let table = get_table(&instance, ctx); + + Ok(Self { table, instance, memory, _not_send_nor_sync: marker::PhantomData }) } /// Resolves a substrate entrypoint by the given name. /// /// An entrypoint must have a signature `(i32, i32) -> i64`, otherwise this function will return /// an error. - pub fn resolve_entrypoint(&self, method: InvokeMethod) -> Result { + pub fn resolve_entrypoint( + &self, + method: InvokeMethod, + mut ctx: impl AsContextMut, + ) -> Result { Ok(match method { InvokeMethod::Export(method) => { // Resolve the requested method and verify that it has a proper signature. - let export = self.instance.get_export(method).ok_or_else(|| { + let export = self.instance.get_export(&mut ctx, method).ok_or_else(|| { Error::from(format!("Exported method {} is not found", method)) })?; let func = extern_func(&export) .ok_or_else(|| Error::from(format!("Export {} is not a function", method)))? .clone(); - EntryPoint::direct(func).map_err(|_| { + EntryPoint::direct(func, ctx).map_err(|_| { Error::from(format!("Exported function '{}' has invalid signature.", method)) })? }, InvokeMethod::Table(func_ref) => { - let table = - self.instance.get_table("__indirect_function_table").ok_or(Error::NoTable)?; - let val = table.get(func_ref).ok_or(Error::NoTableEntryWithIndex(func_ref))?; + let table = self + .instance + .get_table(&mut ctx, "__indirect_function_table") + .ok_or(Error::NoTable)?; + let val = + table.get(&mut ctx, func_ref).ok_or(Error::NoTableEntryWithIndex(func_ref))?; let func = val .funcref() .ok_or(Error::TableElementIsNotAFunction(func_ref))? .ok_or(Error::FunctionRefIsNull(func_ref))? .clone(); - EntryPoint::direct(func).map_err(|_| { + EntryPoint::direct(func, ctx).map_err(|_| { Error::from(format!( "Function @{} in exported table has invalid signature for direct call.", func_ref, @@ -205,10 +221,12 @@ impl InstanceWrapper { })? }, InvokeMethod::TableWithWrapper { dispatcher_ref, func } => { - let table = - self.instance.get_table("__indirect_function_table").ok_or(Error::NoTable)?; + let table = self + .instance + .get_table(&mut ctx, "__indirect_function_table") + .ok_or(Error::NoTable)?; let val = table - .get(dispatcher_ref) + .get(&mut ctx, dispatcher_ref) .ok_or(Error::NoTableEntryWithIndex(dispatcher_ref))?; let dispatcher = val .funcref() @@ -216,7 +234,7 @@ impl InstanceWrapper { .ok_or(Error::FunctionRefIsNull(dispatcher_ref))? .clone(); - EntryPoint::wrapped(dispatcher, func).map_err(|_| { + EntryPoint::wrapped(dispatcher, func, ctx).map_err(|_| { Error::from(format!( "Function @{} in exported table has invalid signature for wrapped call.", dispatcher_ref, @@ -234,17 +252,17 @@ impl InstanceWrapper { /// Reads `__heap_base: i32` global variable and returns it. /// /// If it doesn't exist, not a global or of not i32 type returns an error. - pub fn extract_heap_base(&self) -> Result { + pub fn extract_heap_base(&self, mut ctx: impl AsContextMut) -> Result { let heap_base_export = self .instance - .get_export("__heap_base") + .get_export(&mut ctx, "__heap_base") .ok_or_else(|| Error::from("__heap_base is not found"))?; let heap_base_global = extern_global(&heap_base_export) .ok_or_else(|| Error::from("__heap_base is not a global"))?; let heap_base = heap_base_global - .get() + .get(&mut ctx) .i32() .ok_or_else(|| Error::from("__heap_base is not a i32"))?; @@ -252,15 +270,15 @@ impl InstanceWrapper { } /// Get the value from a global with the given `name`. - pub fn get_global_val(&self, name: &str) -> Result> { - let global = match self.instance.get_export(name) { + pub fn get_global_val(&self, mut ctx: impl AsContextMut, name: &str) -> Result> { + let global = match self.instance.get_export(&mut ctx, name) { Some(global) => global, None => return Ok(None), }; let global = extern_global(&global).ok_or_else(|| format!("`{}` is not a global", name))?; - match global.get() { + match global.get(ctx) { Val::I32(val) => Ok(Some(Value::I32(val))), Val::I64(val) => Ok(Some(Value::I64(val))), Val::F32(val) => Ok(Some(Value::F32(val))), @@ -268,12 +286,17 @@ impl InstanceWrapper { _ => Err("Unknown value type".into()), } } + + /// Get a global with the given `name`. + pub fn get_global(&self, ctx: impl AsContextMut, name: &str) -> Option { + self.instance.get_global(ctx, name) + } } /// Extract linear memory instance from the given instance. -fn get_linear_memory(instance: &Instance) -> Result { +fn get_linear_memory(instance: &Instance, ctx: impl AsContextMut) -> Result { let memory_export = instance - .get_export("memory") + .get_export(ctx, "memory") .ok_or_else(|| Error::from("memory is not exported under `memory` name"))?; let memory = extern_memory(&memory_export) @@ -284,9 +307,9 @@ fn get_linear_memory(instance: &Instance) -> Result { } /// Extract the table from the given instance if any. -fn get_table(instance: &Instance) -> Option { +fn get_table(instance: &Instance, ctx: impl AsContextMut) -> Option
{ instance - .get_export("__indirect_function_table") + .get_export(ctx, "__indirect_function_table") .as_ref() .and_then(extern_table) .cloned() @@ -297,12 +320,17 @@ impl InstanceWrapper { /// Read data from a slice of memory into a newly allocated buffer. /// /// Returns an error if the read would go out of the memory bounds. - pub fn read_memory(&self, source_addr: Pointer, size: usize) -> Result> { - let range = checked_range(source_addr.into(), size, self.memory.data_size()) + pub fn read_memory( + &self, + ctx: impl AsContext, + source_addr: Pointer, + size: usize, + ) -> Result> { + let range = checked_range(source_addr.into(), size, self.memory.data_size(&ctx)) .ok_or_else(|| Error::Other("memory read is out of bounds".into()))?; let mut buffer = vec![0; range.len()]; - self.read_memory_into(source_addr, &mut buffer)?; + self.read_memory_into(ctx, source_addr, &mut buffer)?; Ok(buffer) } @@ -310,33 +338,35 @@ impl InstanceWrapper { /// Read data from the instance memory into a slice. /// /// Returns an error if the read would go out of the memory bounds. - pub fn read_memory_into(&self, source_addr: Pointer, dest: &mut [u8]) -> Result<()> { - unsafe { - // This should be safe since we don't grow up memory while caching this reference and - // we give up the reference before returning from this function. - let memory = self.memory_as_slice(); - - let range = checked_range(source_addr.into(), dest.len(), memory.len()) - .ok_or_else(|| Error::Other("memory read is out of bounds".into()))?; - dest.copy_from_slice(&memory[range]); - Ok(()) - } + pub fn read_memory_into( + &self, + ctx: impl AsContext, + address: Pointer, + dest: &mut [u8], + ) -> Result<()> { + let memory = self.memory.data(ctx.as_context()); + + let range = checked_range(address.into(), dest.len(), memory.len()) + .ok_or_else(|| Error::Other("memory read is out of bounds".into()))?; + dest.copy_from_slice(&memory[range]); + Ok(()) } /// Write data to the instance memory from a slice. /// /// Returns an error if the write would go out of the memory bounds. - pub fn write_memory_from(&self, dest_addr: Pointer, data: &[u8]) -> Result<()> { - unsafe { - // This should be safe since we don't grow up memory while caching this reference and - // we give up the reference before returning from this function. - let memory = self.memory_as_slice_mut(); - - let range = checked_range(dest_addr.into(), data.len(), memory.len()) - .ok_or_else(|| Error::Other("memory write is out of bounds".into()))?; - memory[range].copy_from_slice(data); - Ok(()) - } + pub fn write_memory_from( + &self, + mut ctx: impl AsContextMut, + address: Pointer, + data: &[u8], + ) -> Result<()> { + let memory = self.memory.data_mut(ctx.as_context_mut()); + + let range = checked_range(address.into(), data.len(), memory.len()) + .ok_or_else(|| Error::Other("memory write is out of bounds".into()))?; + memory[range].copy_from_slice(data); + Ok(()) } /// Allocate some memory of the given size. Returns pointer to the allocated memory region. @@ -345,16 +375,13 @@ impl InstanceWrapper { /// to get more details. pub fn allocate( &self, + mut ctx: impl AsContextMut, allocator: &mut sc_allocator::FreeingBumpHeapAllocator, size: WordSize, ) -> Result> { - unsafe { - // This should be safe since we don't grow up memory while caching this reference and - // we give up the reference before returning from this function. - let memory = self.memory_as_slice_mut(); + let memory = self.memory.data_mut(ctx.as_context_mut()); - allocator.allocate(memory, size).map_err(Into::into) - } + allocator.allocate(memory, size).map_err(Into::into) } /// Deallocate the memory pointed by the given pointer. @@ -362,64 +389,25 @@ impl InstanceWrapper { /// Returns `Err` in case the given memory region cannot be deallocated. pub fn deallocate( &self, + mut ctx: impl AsContextMut, allocator: &mut sc_allocator::FreeingBumpHeapAllocator, ptr: Pointer, ) -> Result<()> { - unsafe { - // This should be safe since we don't grow up memory while caching this reference and - // we give up the reference before returning from this function. - let memory = self.memory_as_slice_mut(); + let memory = self.memory.data_mut(ctx.as_context_mut()); - allocator.deallocate(memory, ptr).map_err(Into::into) - } - } - - /// Returns linear memory of the wasm instance as a slice. - /// - /// # Safety - /// - /// Wasmtime doesn't provide comprehensive documentation about the exact behavior of the data - /// pointer. If a dynamic style heap is used the base pointer of the heap can change. Since - /// growing, we cannot guarantee the lifetime of the returned slice reference. - unsafe fn memory_as_slice(&self) -> &[u8] { - let ptr = self.memory.data_ptr() as *const _; - let len = self.memory.data_size(); - - if len == 0 { - &[] - } else { - slice::from_raw_parts(ptr, len) - } - } - - /// Returns linear memory of the wasm instance as a slice. - /// - /// # Safety - /// - /// See `[memory_as_slice]`. In addition to those requirements, since a mutable reference is - /// returned it must be ensured that only one mutable and no shared references to memory exists - /// at the same time. - unsafe fn memory_as_slice_mut(&self) -> &mut [u8] { - let ptr = self.memory.data_ptr(); - let len = self.memory.data_size(); - - if len == 0 { - &mut [] - } else { - slice::from_raw_parts_mut(ptr, len) - } + allocator.deallocate(memory, ptr).map_err(Into::into) } /// Returns the pointer to the first byte of the linear memory for this instance. - pub fn base_ptr(&self) -> *const u8 { - self.memory.data_ptr() + pub fn base_ptr(&self, ctx: impl AsContext) -> *const u8 { + self.memory.data_ptr(ctx) } /// Removes physical backing from the allocated linear memory. This leads to returning the /// memory back to the system. While the memory is zeroed this is considered as a side-effect /// and is not relied upon. Thus this function acts as a hint. - pub fn decommit(&self) { - if self.memory.data_size() == 0 { + pub fn decommit(&self, ctx: impl AsContext) { + if self.memory.data_size(&ctx) == 0 { return } @@ -428,8 +416,8 @@ impl InstanceWrapper { use std::sync::Once; unsafe { - let ptr = self.memory.data_ptr(); - let len = self.memory.data_size(); + let ptr = self.memory.data_ptr(&ctx); + let len = self.memory.data_size(ctx); // Linux handles MADV_DONTNEED reliably. The result is that the given area // is unmapped and will be zeroed on the next pagefault. @@ -447,23 +435,3 @@ impl InstanceWrapper { } } } - -impl runtime_blob::InstanceGlobals for InstanceWrapper { - type Global = wasmtime::Global; - - fn get_global(&self, export_name: &str) -> Self::Global { - self.instance - .get_global(export_name) - .expect("get_global is guaranteed to be called with an export name of a global; qed") - } - - fn get_global_value(&self, global: &Self::Global) -> Value { - from_wasmtime_val(global.get()) - } - - fn set_global_value(&self, global: &Self::Global, value: Value) { - global.set(into_wasmtime_val(value)).expect( - "the value is guaranteed to be of the same value; the global is guaranteed to be mutable; qed", - ); - } -} diff --git a/client/executor/wasmtime/src/lib.rs b/client/executor/wasmtime/src/lib.rs index 62b0b205f6de6..e0d6a262afda9 100644 --- a/client/executor/wasmtime/src/lib.rs +++ b/client/executor/wasmtime/src/lib.rs @@ -16,12 +16,21 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -/// ! Defines a `WasmRuntime` that uses the Wasmtime JIT to execute. +//! Defines a `WasmRuntime` that uses the Wasmtime JIT to execute. +//! +//! You can choose a profiling strategy at runtime with +//! environment variable `WASMTIME_PROFILING_STRATEGY`: +//! +//! | `WASMTIME_PROFILING_STRATEGY` | Effect | +//! |-------------|-------------------------| +//! | undefined | No profiling | +//! | `"jitdump"` | jitdump profiling | +//! | other value | No profiling (warning) | + mod host; mod imports; mod instance_wrapper; mod runtime; -mod state_holder; mod util; #[cfg(test)] diff --git a/client/executor/wasmtime/src/runtime.rs b/client/executor/wasmtime/src/runtime.rs index f6878ec5ee6e1..bd113c3383838 100644 --- a/client/executor/wasmtime/src/runtime.rs +++ b/client/executor/wasmtime/src/runtime.rs @@ -22,13 +22,15 @@ use crate::{ host::HostState, imports::{resolve_imports, Imports}, instance_wrapper::{EntryPoint, InstanceWrapper}, - state_holder, + util, }; use sc_allocator::FreeingBumpHeapAllocator; use sc_executor_common::{ error::{Result, WasmError}, - runtime_blob::{DataSegmentsSnapshot, ExposedMutableGlobalsSet, GlobalsSnapshot, RuntimeBlob}, + runtime_blob::{ + self, DataSegmentsSnapshot, ExposedMutableGlobalsSet, GlobalsSnapshot, RuntimeBlob, + }, wasm_runtime::{InvokeMethod, WasmInstance, WasmModule}, }; use sp_runtime_interface::unpack_ptr_and_len; @@ -36,9 +38,29 @@ use sp_wasm_interface::{Function, Pointer, Value, WordSize}; use std::{ path::{Path, PathBuf}, rc::Rc, - sync::Arc, + sync::{ + atomic::{AtomicBool, Ordering}, + Arc, + }, }; -use wasmtime::{Engine, Store}; +use wasmtime::{AsContext, AsContextMut, Engine, StoreLimits}; + +pub(crate) struct StoreData { + /// The limits we aply to the store. We need to store it here to return a reference to this + /// object when we have the limits enabled. + limits: StoreLimits, + /// This will only be set when we call into the runtime. + host_state: Option>, +} + +impl StoreData { + /// Returns a reference to the host state. + pub fn host_state(&self) -> Option<&Rc> { + self.host_state.as_ref() + } +} + +pub(crate) type Store = wasmtime::Store; enum Strategy { FastInstanceReuse { @@ -46,6 +68,7 @@ enum Strategy { globals_snapshot: GlobalsSnapshot, data_segments_snapshot: Arc, heap_base: u32, + store: Store, }, RecreateInstance(InstanceCreator), } @@ -54,12 +77,37 @@ struct InstanceCreator { store: Store, module: Arc, imports: Arc, - heap_pages: u32, + heap_pages: u64, } impl InstanceCreator { - fn instantiate(&self) -> Result { - InstanceWrapper::new(&self.store, &*self.module, &*self.imports, self.heap_pages) + fn instantiate(&mut self) -> Result { + InstanceWrapper::new(&*self.module, &*self.imports, self.heap_pages, &mut self.store) + } +} + +struct InstanceGlobals<'a, C> { + ctx: &'a mut C, + instance: &'a InstanceWrapper, +} + +impl<'a, C: AsContextMut> runtime_blob::InstanceGlobals for InstanceGlobals<'a, C> { + type Global = wasmtime::Global; + + fn get_global(&mut self, export_name: &str) -> Self::Global { + self.instance + .get_global(&mut self.ctx, export_name) + .expect("get_global is guaranteed to be called with an export name of a global; qed") + } + + fn get_global_value(&mut self, global: &Self::Global) -> Value { + util::from_wasmtime_val(global.get(&mut self.ctx)) + } + + fn set_global_value(&mut self, global: &Self::Global, value: Value) { + global.set(&mut self.ctx, util::into_wasmtime_val(value)).expect( + "the value is guaranteed to be of the same value; the global is guaranteed to be mutable; qed", + ); } } @@ -82,19 +130,25 @@ pub struct WasmtimeRuntime { impl WasmtimeRuntime { /// Creates the store respecting the set limits. fn new_store(&self) -> Store { - match self.config.max_memory_pages { - Some(max_memory_pages) => Store::new_with_limits( - &self.engine, - wasmtime::StoreLimitsBuilder::new().memory_pages(max_memory_pages).build(), - ), - None => Store::new(&self.engine), + let limits = if let Some(max_memory_size) = self.config.max_memory_size { + wasmtime::StoreLimitsBuilder::new().memory_size(max_memory_size).build() + } else { + Default::default() + }; + + let mut store = Store::new(&self.engine, StoreData { limits, host_state: None }); + + if self.config.max_memory_size.is_some() { + store.limiter(|s| &mut s.limits); } + + store } } impl WasmModule for WasmtimeRuntime { fn new_instance(&self) -> Result> { - let store = self.new_store(); + let mut store = self.new_store(); // Scan all imports, find the matching host functions, and create stubs that adapt arguments // and results. @@ -103,7 +157,7 @@ impl WasmModule for WasmtimeRuntime { // However, I am not sure if that's a good idea since it would be pushing our luck // further by assuming that `Store` not only `Send` but also `Sync`. let imports = resolve_imports( - &store, + &mut store, &self.module, &self.host_functions, self.config.heap_pages, @@ -112,21 +166,24 @@ impl WasmModule for WasmtimeRuntime { let strategy = if let Some(ref snapshot_data) = self.snapshot_data { let instance_wrapper = - InstanceWrapper::new(&store, &self.module, &imports, self.config.heap_pages)?; - let heap_base = instance_wrapper.extract_heap_base()?; + InstanceWrapper::new(&self.module, &imports, self.config.heap_pages, &mut store)?; + let heap_base = instance_wrapper.extract_heap_base(&mut store)?; // This function panics if the instance was created from a runtime blob different from // which the mutable globals were collected. Here, it is easy to see that there is only // a single runtime blob and thus it's the same that was used for both creating the // instance and collecting the mutable globals. - let globals_snapshot = - GlobalsSnapshot::take(&snapshot_data.mutable_globals, &instance_wrapper); + let globals_snapshot = GlobalsSnapshot::take( + &snapshot_data.mutable_globals, + &mut InstanceGlobals { ctx: &mut store, instance: &instance_wrapper }, + ); Strategy::FastInstanceReuse { instance_wrapper: Rc::new(instance_wrapper), globals_snapshot, data_segments_snapshot: snapshot_data.data_segments_snapshot.clone(), heap_base, + store, } } else { Strategy::RecreateInstance(InstanceCreator { @@ -152,48 +209,63 @@ pub struct WasmtimeInstance { unsafe impl Send for WasmtimeInstance {} impl WasmInstance for WasmtimeInstance { - fn call(&self, method: InvokeMethod, data: &[u8]) -> Result> { - match &self.strategy { + fn call(&mut self, method: InvokeMethod, data: &[u8]) -> Result> { + match &mut self.strategy { Strategy::FastInstanceReuse { instance_wrapper, globals_snapshot, data_segments_snapshot, heap_base, + ref mut store, } => { - let entrypoint = instance_wrapper.resolve_entrypoint(method)?; + let entrypoint = instance_wrapper.resolve_entrypoint(method, &mut *store)?; data_segments_snapshot.apply(|offset, contents| { - instance_wrapper.write_memory_from(Pointer::new(offset), contents) + instance_wrapper.write_memory_from(&mut *store, Pointer::new(offset), contents) })?; - globals_snapshot.apply(&**instance_wrapper); + globals_snapshot + .apply(&mut InstanceGlobals { ctx: &mut *store, instance: &*instance_wrapper }); let allocator = FreeingBumpHeapAllocator::new(*heap_base); - let result = - perform_call(data, Rc::clone(&instance_wrapper), entrypoint, allocator); + let result = perform_call( + &mut *store, + data, + instance_wrapper.clone(), + entrypoint, + allocator, + ); // Signal to the OS that we are done with the linear memory and that it can be // reclaimed. - instance_wrapper.decommit(); + instance_wrapper.decommit(&store); result }, - Strategy::RecreateInstance(instance_creator) => { + Strategy::RecreateInstance(ref mut instance_creator) => { let instance_wrapper = instance_creator.instantiate()?; - let heap_base = instance_wrapper.extract_heap_base()?; - let entrypoint = instance_wrapper.resolve_entrypoint(method)?; + let heap_base = instance_wrapper.extract_heap_base(&mut instance_creator.store)?; + let entrypoint = + instance_wrapper.resolve_entrypoint(method, &mut instance_creator.store)?; let allocator = FreeingBumpHeapAllocator::new(heap_base); - perform_call(data, Rc::new(instance_wrapper), entrypoint, allocator) + perform_call( + &mut instance_creator.store, + data, + Rc::new(instance_wrapper), + entrypoint, + allocator, + ) }, } } - fn get_global_const(&self, name: &str) -> Result> { - match &self.strategy { - Strategy::FastInstanceReuse { instance_wrapper, .. } => - instance_wrapper.get_global_val(name), - Strategy::RecreateInstance(instance_creator) => - instance_creator.instantiate()?.get_global_val(name), + fn get_global_const(&mut self, name: &str) -> Result> { + match &mut self.strategy { + Strategy::FastInstanceReuse { instance_wrapper, ref mut store, .. } => + instance_wrapper.get_global_val(&mut *store, name), + Strategy::RecreateInstance(ref mut instance_creator) => instance_creator + .instantiate()? + .get_global_val(&mut instance_creator.store, name), } } @@ -204,8 +276,8 @@ impl WasmInstance for WasmtimeInstance { // associated with it. None }, - Strategy::FastInstanceReuse { instance_wrapper, .. } => - Some(instance_wrapper.base_ptr()), + Strategy::FastInstanceReuse { instance_wrapper, store, .. } => + Some(instance_wrapper.base_ptr(&store)), } } } @@ -253,6 +325,23 @@ fn common_config(semantics: &Semantics) -> std::result::Result wasmtime::ProfilingStrategy::JitDump, + None => wasmtime::ProfilingStrategy::None, + Some(_) => { + // Remember if we have already logged a warning due to an unknown profiling strategy. + static UNKNOWN_PROFILING_STRATEGY: AtomicBool = AtomicBool::new(false); + // Make sure that the warning will not be relogged regularly. + if !UNKNOWN_PROFILING_STRATEGY.swap(true, Ordering::Relaxed) { + log::warn!("WASMTIME_PROFILING_STRATEGY is set to unknown value, ignored."); + } + wasmtime::ProfilingStrategy::None + }, + }; + config + .profiler(profiler) + .map_err(|e| WasmError::Instantiation(format!("fail to set profiler: {}", e)))?; + if let Some(DeterministicStackLimit { native_stack_max, .. }) = semantics.deterministic_stack_limit { @@ -261,6 +350,8 @@ fn common_config(semantics: &Semantics) -> std::result::Result std::result::Result, + pub max_memory_size: Option, /// The WebAssembly standard requires all imports of an instantiated module to be resolved, - /// othewise, the instantiation fails. If this option is set to `true`, then this behavior is + /// otherwise, the instantiation fails. If this option is set to `true`, then this behavior is /// overriden and imports that are requested by the module and not provided by the host /// functions will be resolved using stubs. These stubs will trap upon a call. pub allow_missing_func_imports: bool, @@ -536,40 +631,50 @@ pub fn prepare_runtime_artifact( } fn perform_call( + mut ctx: impl AsContextMut, data: &[u8], instance_wrapper: Rc, entrypoint: EntryPoint, mut allocator: FreeingBumpHeapAllocator, ) -> Result> { - let (data_ptr, data_len) = inject_input_data(&instance_wrapper, &mut allocator, data)?; + let (data_ptr, data_len) = + inject_input_data(&mut ctx, &instance_wrapper, &mut allocator, data)?; let host_state = HostState::new(allocator, instance_wrapper.clone()); - let ret = state_holder::with_initialized_state(&host_state, || -> Result<_> { - Ok(unpack_ptr_and_len(entrypoint.call(data_ptr, data_len)?)) - }); + + // Set the host state before calling into wasm. + ctx.as_context_mut().data_mut().host_state = Some(Rc::new(host_state)); + + let ret = entrypoint.call(&mut ctx, data_ptr, data_len).map(unpack_ptr_and_len); + + // Reset the host state + ctx.as_context_mut().data_mut().host_state = None; + let (output_ptr, output_len) = ret?; - let output = extract_output_data(&instance_wrapper, output_ptr, output_len)?; + let output = extract_output_data(ctx, &instance_wrapper, output_ptr, output_len)?; Ok(output) } fn inject_input_data( + mut ctx: impl AsContextMut, instance: &InstanceWrapper, allocator: &mut FreeingBumpHeapAllocator, data: &[u8], ) -> Result<(Pointer, WordSize)> { let data_len = data.len() as WordSize; - let data_ptr = instance.allocate(allocator, data_len)?; - instance.write_memory_from(data_ptr, data)?; + let data_ptr = instance.allocate(&mut ctx, allocator, data_len)?; + instance.write_memory_from(ctx, data_ptr, data)?; Ok((data_ptr, data_len)) } fn extract_output_data( + ctx: impl AsContext, instance: &InstanceWrapper, output_ptr: u32, output_len: u32, ) -> Result> { let mut output = vec![0; output_len as usize]; - instance.read_memory_into(Pointer::new(output_ptr), &mut output)?; + instance.read_memory_into(ctx, Pointer::new(output_ptr), &mut output)?; Ok(output) } diff --git a/client/executor/wasmtime/src/state_holder.rs b/client/executor/wasmtime/src/state_holder.rs deleted file mode 100644 index 0e2684cd25130..0000000000000 --- a/client/executor/wasmtime/src/state_holder.rs +++ /dev/null @@ -1,45 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use crate::host::{HostContext, HostState}; - -scoped_tls::scoped_thread_local!(static HOST_STATE: HostState); - -/// Provide `HostState` for the runtime method call and execute the given function `f`. -/// -/// During the execution of the provided function `with_context` will be callable. -pub fn with_initialized_state(s: &HostState, f: F) -> R -where - F: FnOnce() -> R, -{ - HOST_STATE.set(s, f) -} - -/// Create a `HostContext` from the contained `HostState` and execute the given function `f`. -/// -/// This function is only callable within closure passed to `init_state`. Otherwise, the passed -/// context will be `None`. -pub fn with_context(f: F) -> R -where - F: FnOnce(Option) -> R, -{ - if !HOST_STATE.is_set() { - return f(None) - } - HOST_STATE.with(|state| f(Some(state.materialize()))) -} diff --git a/client/executor/wasmtime/src/tests.rs b/client/executor/wasmtime/src/tests.rs index 366352d7f5c39..261afba0c6bc9 100644 --- a/client/executor/wasmtime/src/tests.rs +++ b/client/executor/wasmtime/src/tests.rs @@ -28,8 +28,8 @@ struct RuntimeBuilder { fast_instance_reuse: bool, canonicalize_nans: bool, deterministic_stack: bool, - heap_pages: u32, - max_memory_pages: Option, + heap_pages: u64, + max_memory_size: Option, } impl RuntimeBuilder { @@ -42,7 +42,7 @@ impl RuntimeBuilder { canonicalize_nans: false, deterministic_stack: false, heap_pages: 1024, - max_memory_pages: None, + max_memory_size: None, } } @@ -58,8 +58,8 @@ impl RuntimeBuilder { self.deterministic_stack = deterministic_stack; } - fn max_memory_pages(&mut self, max_memory_pages: Option) { - self.max_memory_pages = max_memory_pages; + fn max_memory_size(&mut self, max_memory_size: Option) { + self.max_memory_size = max_memory_size; } fn build(self) -> Arc { @@ -82,7 +82,7 @@ impl RuntimeBuilder { blob, crate::Config { heap_pages: self.heap_pages, - max_memory_pages: self.max_memory_pages, + max_memory_size: self.max_memory_size, allow_missing_func_imports: true, cache_path: None, semantics: crate::Semantics { @@ -95,6 +95,7 @@ impl RuntimeBuilder { false => None, }, canonicalize_nans: self.canonicalize_nans, + parallel_compilation: true, }, }, { @@ -116,7 +117,7 @@ fn test_nan_canonicalization() { builder.build() }; - let instance = runtime.new_instance().expect("failed to instantiate a runtime"); + let mut instance = runtime.new_instance().expect("failed to instantiate a runtime"); /// A NaN with canonical payload bits. const CANONICAL_NAN_BITS: u32 = 0x7fc00000; @@ -159,7 +160,7 @@ fn test_stack_depth_reaching() { builder.deterministic_stack(true); builder.build() }; - let instance = runtime.new_instance().expect("failed to instantiate a runtime"); + let mut instance = runtime.new_instance().expect("failed to instantiate a runtime"); let err = instance.call_export("test-many-locals", &[]).unwrap_err(); @@ -171,20 +172,22 @@ fn test_stack_depth_reaching() { #[test] fn test_max_memory_pages() { fn try_instantiate( - max_memory_pages: Option, + max_memory_size: Option, wat: &'static str, ) -> Result<(), Box> { let runtime = { let mut builder = RuntimeBuilder::new_on_demand(); builder.use_wat(wat); - builder.max_memory_pages(max_memory_pages); + builder.max_memory_size(max_memory_size); builder.build() }; - let instance = runtime.new_instance()?; + let mut instance = runtime.new_instance()?; let _ = instance.call_export("main", &[])?; Ok(()) } + const WASM_PAGE_SIZE: usize = 65536; + // check the old behavior if preserved. That is, if no limit is set we allow 4 GiB of memory. try_instantiate( None, @@ -213,9 +216,9 @@ fn test_max_memory_pages() { // max is not specified, therefore it's implied to be 65536 pages (4 GiB). // - // max_memory_pages = 1 (initial) + 1024 (heap_pages) + // max_memory_size = (1 (initial) + 1024 (heap_pages)) * WASM_PAGE_SIZE try_instantiate( - Some(1 + 1024), + Some((1 + 1024) * WASM_PAGE_SIZE), r#" (module @@ -233,7 +236,7 @@ fn test_max_memory_pages() { // max is specified explicitly to 2048 pages. try_instantiate( - Some(1 + 1024), + Some((1 + 1024) * WASM_PAGE_SIZE), r#" (module @@ -251,7 +254,7 @@ fn test_max_memory_pages() { // memory grow should work as long as it doesn't exceed 1025 pages in total. try_instantiate( - Some(0 + 1024 + 25), + Some((0 + 1024 + 25) * WASM_PAGE_SIZE), r#" (module (import "env" "memory" (memory 0)) ;; <- zero starting pages. @@ -280,7 +283,7 @@ fn test_max_memory_pages() { // We start with 1025 pages and try to grow at least one. try_instantiate( - Some(1 + 1024), + Some((1 + 1024) * WASM_PAGE_SIZE), r#" (module (import "env" "memory" (memory 1)) ;; <- initial=1, meaning after heap pages mount the diff --git a/client/finality-grandpa-warp-sync/Cargo.toml b/client/finality-grandpa-warp-sync/Cargo.toml index a444125fdfa11..9736b25ccac55 100644 --- a/client/finality-grandpa-warp-sync/Cargo.toml +++ b/client/finality-grandpa-warp-sync/Cargo.toml @@ -5,7 +5,7 @@ version = "0.10.0-dev" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] edition = "2018" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" [package.metadata.docs.rs] diff --git a/client/finality-grandpa/Cargo.toml b/client/finality-grandpa/Cargo.toml index 7fdd91e557ab7..2d45fa100f79d 100644 --- a/client/finality-grandpa/Cargo.toml +++ b/client/finality-grandpa/Cargo.toml @@ -4,7 +4,7 @@ version = "0.10.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Integration of the GRANDPA finality gadget into substrate." documentation = "https://docs.rs/sc-finality-grandpa" @@ -23,7 +23,7 @@ futures-timer = "3.0.1" log = "0.4.8" parking_lot = "0.11.1" rand = "0.8.4" -parity-scale-codec = { version = "2.0.0", features = ["derive"] } +parity-scale-codec = { version = "2.3.1", features = ["derive"] } sp-application-crypto = { version = "4.0.0-dev", path = "../../primitives/application-crypto" } sp-arithmetic = { version = "4.0.0-dev", path = "../../primitives/arithmetic" } sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } diff --git a/client/finality-grandpa/src/authorities.rs b/client/finality-grandpa/src/authorities.rs index 6e5dfdd05e624..6eb13099aa202 100644 --- a/client/finality-grandpa/src/authorities.rs +++ b/client/finality-grandpa/src/authorities.rs @@ -168,7 +168,7 @@ pub struct AuthoritySet { /// Track at which blocks the set id changed. This is useful when we need to prove finality for /// a given block since we can figure out what set the block belongs to and when the set /// started/ended. - authority_set_changes: AuthoritySetChanges, + pub(crate) authority_set_changes: AuthoritySetChanges, } impl AuthoritySet @@ -714,6 +714,17 @@ impl AuthoritySetChanges { } } + pub(crate) fn insert(&mut self, block_number: N) { + let idx = self + .0 + .binary_search_by_key(&block_number, |(_, n)| n.clone()) + .unwrap_or_else(|b| b); + + let set_id = if idx == 0 { 0 } else { self.0[idx - 1].0 + 1 }; + assert!(idx == self.0.len() || self.0[idx].0 != set_id); + self.0.insert(idx, (set_id, block_number)); + } + /// Returns an iterator over all historical authority set changes starting at the given block /// number (excluded). The iterator yields a tuple representing the set id and the block number /// of the last block in that set. @@ -1632,6 +1643,18 @@ mod tests { assert_eq!(authorities.pending_forced_changes.first().unwrap().canon_hash, "D"); } + #[test] + fn authority_set_changes_insert() { + let mut authority_set_changes = AuthoritySetChanges::empty(); + authority_set_changes.append(0, 41); + authority_set_changes.append(1, 81); + authority_set_changes.append(4, 121); + + authority_set_changes.insert(101); + assert_eq!(authority_set_changes.get_set_id(100), AuthoritySetChangeId::Set(2, 101)); + assert_eq!(authority_set_changes.get_set_id(101), AuthoritySetChangeId::Set(2, 101)); + } + #[test] fn authority_set_changes_for_complete_data() { let mut authority_set_changes = AuthoritySetChanges::empty(); diff --git a/client/finality-grandpa/src/environment.rs b/client/finality-grandpa/src/environment.rs index f27a530ed2f40..c79698902e975 100644 --- a/client/finality-grandpa/src/environment.rs +++ b/client/finality-grandpa/src/environment.rs @@ -1165,7 +1165,7 @@ where debug!(target: "afg", "Finding best chain containing block {:?} with number limit {:?}", block, limit); let result = match select_chain.finality_target(block, None).await { - Ok(Some(best_hash)) => { + Ok(best_hash) => { let best_header = client .header(BlockId::Hash(best_hash))? .expect("Header known to exist after `finality_target` call; qed"); @@ -1223,10 +1223,6 @@ where }) .or_else(|| Some((target_header.hash(), *target_header.number()))) }, - Ok(None) => { - debug!(target: "afg", "Encountered error finding best chain containing {:?}: couldn't find target block", block); - None - }, Err(e) => { debug!(target: "afg", "Encountered error finding best chain containing {:?}: {:?}", block, e); None diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index f663bfe94afdf..d54f7234b44b4 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -118,10 +118,10 @@ where ) .await } else { - Ok(Some(pending_change.canon_hash)) + Ok(pending_change.canon_hash) }; - if let Ok(Some(hash)) = effective_block_hash { + if let Ok(hash) = effective_block_hash { if let Ok(Some(header)) = self.inner.header(BlockId::Hash(hash)) { if *header.number() == pending_change.effective_number() { out.push((header.hash(), *header.number())); @@ -551,6 +551,32 @@ where return self.import_state(block, new_cache).await } + if number <= self.inner.info().finalized_number { + // Importing an old block. Just save justifications and authority set changes + if self.check_new_change(&block.header, hash).is_some() { + if block.justifications.is_none() { + return Err(ConsensusError::ClientImport( + "Justification required when importing \ + an old block with authority set change." + .into(), + )) + } + assert!(block.justifications.is_some()); + let mut authority_set = self.authority_set.inner_locked(); + authority_set.authority_set_changes.insert(number); + crate::aux_schema::update_authority_set::( + &authority_set, + None, + |insert| { + block + .auxiliary + .extend(insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) + }, + ); + } + return (&*self.inner).import_block(block, new_cache).await + } + // on initial sync we will restrict logging under info to avoid spam. let initial_sync = block.origin == BlockOrigin::NetworkInitialSync; diff --git a/client/finality-grandpa/src/voting_rule.rs b/client/finality-grandpa/src/voting_rule.rs index b974afe0d352e..7c8d94d970f86 100644 --- a/client/finality-grandpa/src/voting_rule.rs +++ b/client/finality-grandpa/src/voting_rule.rs @@ -80,8 +80,14 @@ where } /// A custom voting rule that guarantees that our vote is always behind the best -/// block by at least N blocks. In the best case our vote is exactly N blocks -/// behind the best block. +/// block by at least N blocks, unless the base number is < N blocks behind the +/// best, in which case it votes for the base. +/// +/// In the best case our vote is exactly N blocks +/// behind the best block, but if there is a scenario where either +/// >34% of validators run without this rule or the fork-choice rule +/// can prioritize shorter chains over longer ones, the vote may be +/// closer to the best block than N. #[derive(Clone)] pub struct BeforeBestBlockBy(N); impl VotingRule for BeforeBestBlockBy> @@ -92,7 +98,7 @@ where fn restrict_vote( &self, backend: Arc, - _base: &Block::Header, + base: &Block::Header, best_target: &Block::Header, current_target: &Block::Header, ) -> VotingRuleResult { @@ -102,6 +108,12 @@ where return Box::pin(async { None }) } + // Constrain to the base number, if that's the minimal + // vote that can be placed. + if *base.number() + self.0 > *best_target.number() { + return Box::pin(std::future::ready(Some((base.hash(), *base.number())))) + } + // find the target number restricted by this rule let target_number = best_target.number().saturating_sub(self.0); @@ -393,4 +405,34 @@ mod tests { // only one of the rules is applied. assert_eq!(number, 150); } + + #[test] + fn before_best_by_has_cutoff_at_base() { + let rule = BeforeBestBlockBy(2); + + let mut client = Arc::new(TestClientBuilder::new().build()); + + for _ in 0..5 { + let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + + futures::executor::block_on(client.import(BlockOrigin::Own, block)).unwrap(); + } + + let best = client.header(&BlockId::Hash(client.info().best_hash)).unwrap().unwrap(); + let best_number = best.number().clone(); + + for i in 0u32..5 { + let base = client.header(&BlockId::Number(i.into())).unwrap().unwrap(); + let (_, number) = futures::executor::block_on(rule.restrict_vote( + client.clone(), + &base, + &best, + &best, + )) + .unwrap(); + + let expected = std::cmp::max(best_number - 2, *base.number()); + assert_eq!(number, expected, "best = {}, lag = 2, base = {}", best_number, i); + } + } } diff --git a/client/finality-grandpa/src/warp_proof.rs b/client/finality-grandpa/src/warp_proof.rs index 34eaa49cdf360..3c1fa4892f8a3 100644 --- a/client/finality-grandpa/src/warp_proof.rs +++ b/client/finality-grandpa/src/warp_proof.rs @@ -31,7 +31,7 @@ use sp_runtime::{ traits::{Block as BlockT, Header as HeaderT, NumberFor, One}, }; -use std::sync::Arc; +use std::{collections::HashMap, sync::Arc}; /// Warp proof processing error. #[derive(Debug, derive_more::Display, derive_more::From)] @@ -194,6 +194,7 @@ impl WarpSyncProof { &self, set_id: SetId, authorities: AuthorityList, + hard_forks: &HashMap<(Block::Hash, NumberFor), (SetId, AuthorityList)>, ) -> Result<(SetId, AuthorityList), Error> where NumberFor: BlockNumberOps, @@ -202,26 +203,34 @@ impl WarpSyncProof { let mut current_authorities = authorities; for (fragment_num, proof) in self.proofs.iter().enumerate() { - proof - .justification - .verify(current_set_id, ¤t_authorities) - .map_err(|err| Error::InvalidProof(err.to_string()))?; - - if proof.justification.target().1 != proof.header.hash() { - return Err(Error::InvalidProof( - "Mismatch between header and justification".to_owned(), - )) - } + let hash = proof.header.hash(); + let number = *proof.header.number(); + + if let Some((set_id, list)) = hard_forks.get(&(hash.clone(), number)) { + current_set_id = *set_id; + current_authorities = list.clone(); + } else { + proof + .justification + .verify(current_set_id, ¤t_authorities) + .map_err(|err| Error::InvalidProof(err.to_string()))?; + + if proof.justification.target().1 != hash { + return Err(Error::InvalidProof( + "Mismatch between header and justification".to_owned(), + )) + } - if let Some(scheduled_change) = find_scheduled_change::(&proof.header) { - current_authorities = scheduled_change.next_authorities; - current_set_id += 1; - } else if fragment_num != self.proofs.len() - 1 || !self.is_finished { - // Only the last fragment of the last proof message is allowed to be missing - // the authority set change. - return Err(Error::InvalidProof( - "Header is missing authority set change digest".to_string(), - )) + if let Some(scheduled_change) = find_scheduled_change::(&proof.header) { + current_authorities = scheduled_change.next_authorities; + current_set_id += 1; + } else if fragment_num != self.proofs.len() - 1 || !self.is_finished { + // Only the last fragment of the last proof message is allowed to be missing the + // authority set change. + return Err(Error::InvalidProof( + "Header is missing authority set change digest".to_string(), + )) + } } } Ok((current_set_id, current_authorities)) @@ -235,6 +244,7 @@ where { backend: Arc, authority_set: SharedAuthoritySet>, + hard_forks: HashMap<(Block::Hash, NumberFor), (SetId, AuthorityList)>, } impl> NetworkProvider @@ -245,8 +255,13 @@ where pub fn new( backend: Arc, authority_set: SharedAuthoritySet>, + hard_forks: Vec<(SetId, (Block::Hash, NumberFor), AuthorityList)>, ) -> Self { - NetworkProvider { backend, authority_set } + NetworkProvider { + backend, + authority_set, + hard_forks: hard_forks.into_iter().map(|(s, hn, list)| (hn, (s, list))).collect(), + } } } @@ -283,7 +298,7 @@ where .map(|p| p.header.clone()) .ok_or_else(|| "Empty proof".to_string())?; let (next_set_id, next_authorities) = - proof.verify(set_id, authorities).map_err(Box::new)?; + proof.verify(set_id, authorities, &self.hard_forks).map_err(Box::new)?; if proof.is_finished { Ok(VerificationResult::::Complete(next_set_id, next_authorities, last_header)) } else { @@ -417,7 +432,8 @@ mod tests { WarpSyncProof::generate(&*backend, genesis_hash, &authority_set_changes).unwrap(); // verifying the proof should yield the last set id and authorities - let (new_set_id, new_authorities) = warp_sync_proof.verify(0, genesis_authorities).unwrap(); + let (new_set_id, new_authorities) = + warp_sync_proof.verify(0, genesis_authorities, &Default::default()).unwrap(); let expected_authorities = current_authorities .iter() diff --git a/client/informant/Cargo.toml b/client/informant/Cargo.toml index 88d02f81ad5b3..c28dd4e011945 100644 --- a/client/informant/Cargo.toml +++ b/client/informant/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] description = "Substrate informant." edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" diff --git a/client/informant/src/display.rs b/client/informant/src/display.rs index 1f23856101aa3..6496172b80891 100644 --- a/client/informant/src/display.rs +++ b/client/informant/src/display.rs @@ -20,7 +20,7 @@ use crate::OutputFormat; use ansi_term::Colour; use log::info; use sc_client_api::ClientInfo; -use sc_network::{NetworkStatus, SyncState}; +use sc_network::{NetworkStatus, SyncState, WarpSyncPhase, WarpSyncProgress}; use sp_runtime::traits::{Block as BlockT, CheckedDiv, NumberFor, Saturating, Zero}; use std::{ convert::{TryFrom, TryInto}, @@ -97,11 +97,17 @@ impl InformantDisplay { net_status.state_sync, net_status.warp_sync, ) { + ( + _, + _, + _, + Some(WarpSyncProgress { phase: WarpSyncPhase::DownloadingBlocks(n), .. }), + ) => ("⏩", "Block history".into(), format!(", #{}", n)), (_, _, _, Some(warp)) => ( "⏩", "Warping".into(), format!( - ", {}, ({:.2}) Mib", + ", {}, {:.2} Mib", warp.phase, (warp.total_bytes as f32) / (1024f32 * 1024f32) ), @@ -110,7 +116,7 @@ impl InformantDisplay { "⚙️ ", "Downloading state".into(), format!( - ", {}%, ({:.2}) Mib", + ", {}%, {:.2} Mib", state.percentage, (state.size as f32) / (1024f32 * 1024f32) ), diff --git a/client/keystore/Cargo.toml b/client/keystore/Cargo.toml index 17c651a91decd..74fd85c184c6f 100644 --- a/client/keystore/Cargo.toml +++ b/client/keystore/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Keystore (and session key management) for ed25519 based chains like Polkadot." documentation = "https://docs.rs/sc-keystore" diff --git a/client/light/Cargo.toml b/client/light/Cargo.toml index b10f7646bf9bd..cc567c60524a1 100644 --- a/client/light/Cargo.toml +++ b/client/light/Cargo.toml @@ -5,7 +5,7 @@ version = "4.0.0-dev" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] edition = "2018" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" documentation = "https://docs.rs/sc-light" readme = "README.md" diff --git a/client/network-gossip/Cargo.toml b/client/network-gossip/Cargo.toml index c078e5b892fe2..b4907ade834aa 100644 --- a/client/network-gossip/Cargo.toml +++ b/client/network-gossip/Cargo.toml @@ -5,7 +5,7 @@ version = "0.10.0-dev" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] edition = "2018" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" documentation = "https://docs.rs/sc-network-gossip" readme = "README.md" @@ -23,9 +23,9 @@ lru = "0.6.6" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.9.0", path = "../../utils/prometheus" } sc-network = { version = "0.10.0-dev", path = "../network" } sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } -tracing = "0.1.25" +tracing = "0.1.29" [dev-dependencies] -async-std = "1.6.5" +async-std = "1.10.0" quickcheck = "1.0.3" substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } diff --git a/client/network-gossip/src/lib.rs b/client/network-gossip/src/lib.rs index 55c2fc820637e..5dc40471f138f 100644 --- a/client/network-gossip/src/lib.rs +++ b/client/network-gossip/src/lib.rs @@ -123,16 +123,7 @@ impl Network for Arc> { } fn remove_set_reserved(&self, who: PeerId, protocol: Cow<'static, str>) { - let addr = - iter::once(multiaddr::Protocol::P2p(who.into())).collect::(); - let result = NetworkService::remove_peers_from_reserved_set( - self, - protocol, - iter::once(addr).collect(), - ); - if let Err(err) = result { - log::error!(target: "gossip", "remove_set_reserved failed: {}", err); - } + NetworkService::remove_peers_from_reserved_set(self, protocol, iter::once(who).collect()); } fn disconnect_peer(&self, who: PeerId, protocol: Cow<'static, str>) { diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 873c2a847a29a..d6d054504369b 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -5,7 +5,7 @@ version = "0.10.0-dev" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] edition = "2018" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" documentation = "https://docs.rs/sc-network" readme = "README.md" @@ -18,7 +18,7 @@ prost-build = "0.8" [dependencies] async-trait = "0.1" -async-std = "1.6.5" +async-std = "1.10.0" bitflags = "1.3.2" cid = "0.6.0" bytes = "1" @@ -39,7 +39,7 @@ linked_hash_set = "0.1.3" lru = "0.6.6" log = "0.4.8" parking_lot = "0.11.1" -pin-project = "1.0.4" +pin-project = "1.0.8" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.9.0", path = "../../utils/prometheus" } prost = "0.8" rand = "0.7.2" @@ -48,7 +48,7 @@ sc-client-api = { version = "4.0.0-dev", path = "../api" } sc-peerset = { version = "4.0.0-dev", path = "../peerset" } serde = { version = "1.0.126", features = ["derive"] } serde_json = "1.0.68" -smallvec = "1.5.0" +smallvec = "1.7.0" sp-arithmetic = { version = "4.0.0-dev", path = "../../primitives/arithmetic" } sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } @@ -63,7 +63,7 @@ unsigned-varint = { version = "0.6.0", features = [ "asynchronous_codec", ] } void = "1.0.2" -zeroize = "1.4.1" +zeroize = "1.4.2" libp2p = "0.39.1" [dev-dependencies] diff --git a/client/network/src/block_request_handler.rs b/client/network/src/block_request_handler.rs index 9411ca71fd009..3ea7833970d9e 100644 --- a/client/network/src/block_request_handler.rs +++ b/client/network/src/block_request_handler.rs @@ -85,13 +85,14 @@ struct SeenRequestsKey { support_multiple_justifications: bool, } +#[allow(clippy::derive_hash_xor_eq)] impl Hash for SeenRequestsKey { fn hash(&self, state: &mut H) { self.peer.hash(state); self.max_blocks.hash(state); self.direction.hash(state); self.attributes.hash(state); - + self.support_multiple_justifications.hash(state); match self.from { BlockId::Hash(h) => h.hash(state), BlockId::Number(n) => n.hash(state), diff --git a/client/network/src/config.rs b/client/network/src/config.rs index d08e29ef8589f..76c806ccbf7b6 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -155,14 +155,14 @@ pub enum Role { } impl Role { - /// True for `Role::Authority` + /// True for [`Role::Authority`]. pub fn is_authority(&self) -> bool { - matches!(self, Role::Authority { .. }) + matches!(self, Self::Authority { .. }) } - /// True for `Role::Light` + /// True for [`Role::Light`]. pub fn is_light(&self) -> bool { - matches!(self, Role::Light { .. }) + matches!(self, Self::Light { .. }) } } @@ -329,7 +329,7 @@ impl FromStr for MultiaddrWithPeerId { fn from_str(s: &str) -> Result { let (peer_id, multiaddr) = parse_str_addr(s)?; - Ok(MultiaddrWithPeerId { peer_id, multiaddr }) + Ok(Self { peer_id, multiaddr }) } } diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index 431de50c0f192..1ed08cd671d4e 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -512,14 +512,10 @@ impl NetworkBehaviour for DiscoveryBehaviour { list_to_filter.extend(self.mdns.addresses_of_peer(peer_id)); if !self.allow_private_ipv4 { - list_to_filter.retain(|addr| { - if let Some(Protocol::Ip4(addr)) = addr.iter().next() { - if addr.is_private() { - return false - } - } - - true + list_to_filter.retain(|addr| match addr.iter().next() { + Some(Protocol::Ip4(addr)) if !IpNetwork::from(addr).is_global() => false, + Some(Protocol::Ip6(addr)) if !IpNetwork::from(addr).is_global() => false, + _ => true, }); } diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index 51bc370265ef0..2f81ddfa1fb13 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -328,5 +328,5 @@ pub struct NetworkStatus { /// State sync in progress. pub state_sync: Option, /// Warp sync in progress. - pub warp_sync: Option, + pub warp_sync: Option>, } diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index e22d96f32aeb8..a0c52d14fa62f 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -710,8 +710,7 @@ impl Protocol { match self.sync.on_state_data(&peer_id, response) { Ok(sync::OnStateData::Import(origin, block)) => CustomMessageOutcome::BlockImport(origin, vec![block]), - Ok(sync::OnStateData::Request(peer, req)) => - prepare_state_request::(&mut self.peers, peer, req), + Ok(sync::OnStateData::Continue) => CustomMessageOutcome::None, Err(sync::BadPeer(id, repu)) => { self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); self.peerset_handle.report_peer(id, repu); @@ -728,10 +727,7 @@ impl Protocol { response: crate::warp_request_handler::EncodedProof, ) -> CustomMessageOutcome { match self.sync.on_warp_sync_data(&peer_id, response) { - Ok(sync::OnWarpSyncData::WarpProofRequest(peer, req)) => - prepare_warp_sync_request::(&mut self.peers, peer, req), - Ok(sync::OnWarpSyncData::StateRequest(peer, req)) => - prepare_state_request::(&mut self.peers, peer, req), + Ok(()) => CustomMessageOutcome::None, Err(sync::BadPeer(id, repu)) => { self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); self.peerset_handle.report_peer(id, repu); @@ -1106,7 +1102,7 @@ impl Protocol { /// Removes a `PeerId` from the list of reserved peers for syncing purposes. pub fn remove_reserved_peer(&self, peer: PeerId) { - self.peerset_handle.remove_reserved_peer(HARDCODED_PEERSETS_SYNC, peer.clone()); + self.peerset_handle.remove_reserved_peer(HARDCODED_PEERSETS_SYNC, peer); } /// Returns the list of reserved peers. @@ -1116,12 +1112,26 @@ impl Protocol { /// Adds a `PeerId` to the list of reserved peers for syncing purposes. pub fn add_reserved_peer(&self, peer: PeerId) { - self.peerset_handle.add_reserved_peer(HARDCODED_PEERSETS_SYNC, peer.clone()); + self.peerset_handle.add_reserved_peer(HARDCODED_PEERSETS_SYNC, peer); } /// Sets the list of reserved peers for syncing purposes. pub fn set_reserved_peers(&self, peers: HashSet) { - self.peerset_handle.set_reserved_peers(HARDCODED_PEERSETS_SYNC, peers.clone()); + self.peerset_handle.set_reserved_peers(HARDCODED_PEERSETS_SYNC, peers); + } + + /// Sets the list of reserved peers for the given protocol/peerset. + pub fn set_reserved_peerset_peers(&self, protocol: Cow<'static, str>, peers: HashSet) { + if let Some(index) = self.notification_protocols.iter().position(|p| *p == protocol) { + self.peerset_handle + .set_reserved_peers(sc_peerset::SetId::from(index + NUM_HARDCODED_PEERSETS), peers); + } else { + error!( + target: "sub-libp2p", + "set_reserved_peerset_peers with unknown protocol: {}", + protocol + ); + } } /// Removes a `PeerId` from the list of reserved peers. diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index 8938c27aeddd8..001f6cbd7e455 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -143,10 +143,10 @@ pub struct RemoteReadResponse { /// Announcement summary used for debug logging. #[derive(Debug)] pub struct AnnouncementSummary { - block_hash: H::Hash, - number: H::Number, - parent_hash: H::Hash, - state: Option, + pub block_hash: H::Hash, + pub number: H::Number, + pub parent_hash: H::Hash, + pub state: Option, } impl generic::BlockAnnounce { diff --git a/client/network/src/protocol/notifications/behaviour.rs b/client/network/src/protocol/notifications/behaviour.rs index da2967d6f26eb..01138e3207570 100644 --- a/client/network/src/protocol/notifications/behaviour.rs +++ b/client/network/src/protocol/notifications/behaviour.rs @@ -417,7 +417,7 @@ impl Notifications { /// Returns true if we have an open substream to the given peer. pub fn is_open(&self, peer_id: &PeerId, set_id: sc_peerset::SetId) -> bool { - self.peers.get(&(peer_id.clone(), set_id)).map(|p| p.is_open()).unwrap_or(false) + self.peers.get(&(*peer_id, set_id)).map(|p| p.is_open()).unwrap_or(false) } /// Disconnects the given peer if we are connected to it. @@ -1777,7 +1777,7 @@ impl NetworkBehaviour for Notifications { "Handler({}, {:?}) => CloseResult({:?})", source, connection, set_id); - match self.peers.get_mut(&(source.clone(), set_id)) { + match self.peers.get_mut(&(source, set_id)) { // Move the connection from `Closing` to `Closed`. Some(PeerState::Incoming { connections, .. }) | Some(PeerState::DisabledPendingEnable { connections, .. }) | diff --git a/client/network/src/protocol/sync.rs b/client/network/src/protocol/sync.rs index 07f5f76fce7f2..7f85c2b637826 100644 --- a/client/network/src/protocol/sync.rs +++ b/client/network/src/protocol/sync.rs @@ -182,6 +182,12 @@ impl Default for PendingRequests { } } +struct GapSync { + blocks: BlockCollection, + best_queued_number: NumberFor, + target: NumberFor, +} + /// The main data structure which contains all the state for a chains /// active syncing strategy. pub struct ChainSync { @@ -226,6 +232,8 @@ pub struct ChainSync { /// Enable importing existing blocks. This is used used after the state download to /// catch up to the latest state while re-importing blocks. import_existing: bool, + /// Gap download process. + gap_sync: Option>, } /// All the data we have about a Peer that we are trying to sync with @@ -298,6 +306,8 @@ pub enum PeerSyncState { DownloadingState, /// Downloading warp proof. DownloadingWarpProof, + /// Actively downloading block history after warp sync. + DownloadingGap(NumberFor), } impl PeerSyncState { @@ -326,7 +336,7 @@ pub struct StateDownloadProgress { /// Reported warp sync phase. #[derive(Clone, Eq, PartialEq, Debug)] -pub enum WarpSyncPhase { +pub enum WarpSyncPhase { /// Waiting for peers to connect. AwaitingPeers, /// Downloading and verifying grandpa warp proofs. @@ -335,24 +345,27 @@ pub enum WarpSyncPhase { DownloadingState, /// Importing state. ImportingState, + /// Downloading block history. + DownloadingBlocks(NumberFor), } -impl fmt::Display for WarpSyncPhase { +impl fmt::Display for WarpSyncPhase { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match self { Self::AwaitingPeers => write!(f, "Waiting for peers"), Self::DownloadingWarpProofs => write!(f, "Downloading finality proofs"), Self::DownloadingState => write!(f, "Downloading state"), Self::ImportingState => write!(f, "Importing state"), + Self::DownloadingBlocks(n) => write!(f, "Downloading block history (#{})", n), } } } /// Reported warp sync progress. #[derive(Clone, Eq, PartialEq, Debug)] -pub struct WarpSyncProgress { +pub struct WarpSyncProgress { /// Estimated download percentage. - pub phase: WarpSyncPhase, + pub phase: WarpSyncPhase, /// Total bytes downloaded so far. pub total_bytes: u64, } @@ -371,7 +384,7 @@ pub struct Status { /// State sync status in progress, if any. pub state_sync: Option, /// Warp sync in progress, if any. - pub warp_sync: Option, + pub warp_sync: Option>, } /// A peer did not behave as expected and should be reported. @@ -413,16 +426,7 @@ pub enum OnStateData { /// The block and state that should be imported. Import(BlockOrigin, IncomingBlock), /// A new state request needs to be made to the given peer. - Request(PeerId, StateRequest), -} - -/// Result of [`ChainSync::on_warp_sync_data`]. -#[derive(Debug)] -pub enum OnWarpSyncData { - /// Warp proof request is issued. - WarpProofRequest(PeerId, warp::WarpProofRequest), - /// A new state request needs to be made to the given peer. - StateRequest(PeerId, StateRequest), + Continue, } /// Result of [`ChainSync::poll_block_announce_validation`]. @@ -555,6 +559,7 @@ impl ChainSync { warp_sync: None, warp_sync_provider, import_existing: false, + gap_sync: None, }; sync.reset_sync_start_point()?; Ok(sync) @@ -608,10 +613,14 @@ impl ChainSync { SyncState::Idle }; - let warp_sync_progress = match (&self.warp_sync, &self.mode) { - (None, SyncMode::Warp) => + let warp_sync_progress = match (&self.warp_sync, &self.mode, &self.gap_sync) { + (_, _, Some(gap_sync)) => Some(WarpSyncProgress { + phase: WarpSyncPhase::DownloadingBlocks(gap_sync.best_queued_number), + total_bytes: 0, + }), + (None, SyncMode::Warp, _) => Some(WarpSyncProgress { phase: WarpSyncPhase::AwaitingPeers, total_bytes: 0 }), - (Some(sync), _) => Some(sync.progress()), + (Some(sync), _, _) => Some(sync.progress()), _ => None, }; @@ -686,17 +695,6 @@ impl ChainSync { return Ok(None) } - if let SyncMode::Warp = &self.mode { - if self.peers.len() >= MIN_PEERS_TO_START_WARP_SYNC && self.warp_sync.is_none() - { - log::debug!(target: "sync", "Starting warp state sync."); - if let Some(provider) = &self.warp_sync_provider { - self.warp_sync = - Some(WarpSync::new(self.client.clone(), provider.clone())); - } - } - } - // If we are at genesis, just start downloading. let (state, req) = if self.best_queued_number.is_zero() { debug!( @@ -739,6 +737,17 @@ impl ChainSync { }, ); + if let SyncMode::Warp = &self.mode { + if self.peers.len() >= MIN_PEERS_TO_START_WARP_SYNC && self.warp_sync.is_none() + { + log::debug!(target: "sync", "Starting warp state sync."); + if let Some(provider) = &self.warp_sync_provider { + self.warp_sync = + Some(WarpSync::new(self.client.clone(), provider.clone())); + } + } + } + Ok(req) }, Ok(BlockStatus::Queued) | @@ -869,10 +878,13 @@ impl ChainSync { /// Get an iterator over all block requests of all peers. pub fn block_requests(&mut self) -> impl Iterator)> + '_ { - if self.pending_requests.is_empty() || self.state_sync.is_some() || self.warp_sync.is_some() + if self.pending_requests.is_empty() || + self.state_sync.is_some() || + self.mode == SyncMode::Warp { return Either::Left(std::iter::empty()) } + if self.queue_blocks.len() > MAX_IMPORTING_BLOCKS { trace!(target: "sync", "Too many blocks in the queue."); return Either::Left(std::iter::empty()) @@ -888,6 +900,7 @@ impl ChainSync { let queue = &self.queue_blocks; let pending_requests = self.pending_requests.take(); let max_parallel = if major_sync { 1 } else { self.max_parallel_downloads }; + let gap_sync = &mut self.gap_sync; let iter = self.peers.iter_mut().filter_map(move |(id, peer)| { if !peer.state.is_available() || !pending_requests.contains(id) { return None @@ -947,6 +960,26 @@ impl ChainSync { trace!(target: "sync", "Downloading fork {:?} from {}", hash, id); peer.state = PeerSyncState::DownloadingStale(hash); Some((id, req)) + } else if let Some((range, req)) = gap_sync.as_mut().and_then(|sync| { + peer_gap_block_request( + id, + peer, + &mut sync.blocks, + attrs, + sync.target, + sync.best_queued_number, + ) + }) { + peer.state = PeerSyncState::DownloadingGap(range.start); + trace!( + target: "sync", + "New gap block request for {}, (best:{}, common:{}) {:?}", + id, + peer.best_number, + peer.common_number, + req, + ); + Some((id, req)) } else { None } @@ -966,9 +999,9 @@ impl ChainSync { } for (id, peer) in self.peers.iter_mut() { if peer.state.is_available() && peer.common_number >= sync.target_block_num() { - trace!(target: "sync", "New StateRequest for {}", id); peer.state = PeerSyncState::DownloadingState; let request = sync.next_request(); + trace!(target: "sync", "New StateRequest for {}: {:?}", id, request); return Some((*id, request)) } } @@ -982,7 +1015,7 @@ impl ChainSync { { for (id, peer) in self.peers.iter_mut() { if peer.state.is_available() && peer.best_number >= target { - trace!(target: "sync", "New StateRequest for {}", id); + trace!(target: "sync", "New StateRequest for {}: {:?}", id, request); peer.state = PeerSyncState::DownloadingState; return Some((*id, request)) } @@ -1039,6 +1072,7 @@ impl ChainSync { response: BlockResponse, ) -> Result, BadPeer> { self.downloaded_blocks += response.blocks.len(); + let mut gap = false; let new_blocks: Vec> = if let Some(peer) = self.peers.get_mut(who) { let mut blocks = response.blocks; if request @@ -1061,6 +1095,43 @@ impl ChainSync { } self.drain_blocks() }, + PeerSyncState::DownloadingGap(start_block) => { + let start_block = *start_block; + peer.state = PeerSyncState::Available; + if let Some(gap_sync) = &mut self.gap_sync { + gap_sync.blocks.clear_peer_download(who); + validate_blocks::(&blocks, who, Some(request))?; + gap_sync.blocks.insert(start_block, blocks, who.clone()); + gap = true; + gap_sync + .blocks + .drain(gap_sync.best_queued_number + One::one()) + .into_iter() + .map(|block_data| { + let justifications = block_data.block.justifications.or( + legacy_justification_mapping( + block_data.block.justification, + ), + ); + IncomingBlock { + hash: block_data.block.hash, + header: block_data.block.header, + body: block_data.block.body, + indexed_body: block_data.block.indexed_body, + justifications, + origin: block_data.origin, + allow_missing_state: true, + import_existing: self.import_existing, + skip_execution: true, + state: None, + } + }) + .collect() + } else { + debug!(target: "sync", "Unexpected gap block response from {}", who); + return Err(BadPeer(who.clone(), rep::NO_BLOCK)) + } + }, PeerSyncState::DownloadingStale(_) => { peer.state = PeerSyncState::Available; if blocks.is_empty() { @@ -1212,7 +1283,7 @@ impl ChainSync { return Err(BadPeer(*who, rep::NOT_REQUESTED)) }; - Ok(self.validate_and_queue_blocks(new_blocks)) + Ok(self.validate_and_queue_blocks(new_blocks, gap)) } /// Handle a response from the remote to a state request that we made. @@ -1223,6 +1294,11 @@ impl ChainSync { who: &PeerId, response: StateResponse, ) -> Result, BadPeer> { + if let Some(peer) = self.peers.get_mut(&who) { + if let PeerSyncState::DownloadingState = peer.state { + peer.state = PeerSyncState::Available; + } + } let import_result = if let Some(sync) = &mut self.state_sync { debug!( target: "sync", @@ -1261,11 +1337,10 @@ impl ChainSync { skip_execution: self.skip_execution(), state: Some(state), }; - debug!(target: "sync", "State sync is complete. Import is queued"); + debug!(target: "sync", "State download is complete. Import is queued"); Ok(OnStateData::Import(origin, block)) }, - state::ImportResult::Continue(request) => - Ok(OnStateData::Request(who.clone(), request)), + state::ImportResult::Continue => Ok(OnStateData::Continue), state::ImportResult::BadResponse => { debug!(target: "sync", "Bad state data received from {}", who); Err(BadPeer(*who, rep::BAD_BLOCK)) @@ -1280,7 +1355,12 @@ impl ChainSync { &mut self, who: &PeerId, response: warp::EncodedProof, - ) -> Result, BadPeer> { + ) -> Result<(), BadPeer> { + if let Some(peer) = self.peers.get_mut(&who) { + if let PeerSyncState::DownloadingWarpProof = peer.state { + peer.state = PeerSyncState::Available; + } + } let import_result = if let Some(sync) = &mut self.warp_sync { debug!( target: "sync", @@ -1295,10 +1375,7 @@ impl ChainSync { }; match import_result { - warp::WarpProofImportResult::StateRequest(request) => - Ok(OnWarpSyncData::StateRequest(*who, request)), - warp::WarpProofImportResult::WarpProofRequest(request) => - Ok(OnWarpSyncData::WarpProofRequest(*who, request)), + warp::WarpProofImportResult::Success => Ok(()), warp::WarpProofImportResult::BadResponse => { debug!(target: "sync", "Bad proof data received from {}", who); Err(BadPeer(*who, rep::BAD_BLOCK)) @@ -1309,6 +1386,7 @@ impl ChainSync { fn validate_and_queue_blocks( &mut self, mut new_blocks: Vec>, + gap: bool, ) -> OnBlockData { let orig_len = new_blocks.len(); new_blocks.retain(|b| !self.queue_blocks.contains(&b.hash)); @@ -1320,7 +1398,7 @@ impl ChainSync { ); } - let origin = if self.status().state != SyncState::Downloading { + let origin = if !gap && self.status().state != SyncState::Downloading { BlockOrigin::NetworkBroadcast } else { BlockOrigin::NetworkInitialSync @@ -1494,6 +1572,15 @@ impl ChainSync { self.mode = SyncMode::Full; output.extend(self.restart()); } + let gap_sync_complete = + self.gap_sync.as_ref().map_or(false, |s| s.target == number); + if gap_sync_complete { + info!( + target: "sync", + "Block history download is complete." + ); + self.gap_sync = None; + } }, Err(BlockImportError::IncompleteHeader(who)) => if let Some(peer) = who { @@ -1601,6 +1688,11 @@ impl ChainSync { if self.fork_targets.remove(&hash).is_some() { trace!(target: "sync", "Completed fork sync {:?}", hash); } + if let Some(gap_sync) = &mut self.gap_sync { + if number > gap_sync.best_queued_number && number <= gap_sync.target { + gap_sync.best_queued_number = number; + } + } if number > self.best_queued_number { self.best_queued_number = number; self.best_queued_hash = *hash; @@ -1954,6 +2046,9 @@ impl ChainSync { /// import, so this functions checks for such blocks and returns them. pub fn peer_disconnected(&mut self, who: &PeerId) -> Option> { self.blocks.clear_peer_download(who); + if let Some(gap_sync) = &mut self.gap_sync { + gap_sync.blocks.clear_peer_download(who) + } self.peers.remove(who); self.extra_justifications.peer_disconnected(who); self.pending_requests.set_all(); @@ -1963,7 +2058,7 @@ impl ChainSync { }); let blocks = self.drain_blocks(); if !blocks.is_empty() { - Some(self.validate_and_queue_blocks(blocks)) + Some(self.validate_and_queue_blocks(blocks, false)) } else { None } @@ -2043,6 +2138,14 @@ impl ChainSync { } } } + if let Some((start, end)) = info.block_gap { + debug!(target: "sync", "Starting gap sync #{} - #{}", start, end); + self.gap_sync = Some(GapSync { + best_queued_number: start - One::one(), + target: end, + blocks: BlockCollection::new(), + }); + } trace!(target: "sync", "Restarted sync at #{} ({:?})", self.best_queued_number, self.best_queued_hash); Ok(()) } @@ -2250,6 +2353,39 @@ fn peer_block_request( Some((range, request)) } +/// Get a new block request for the peer if any. +fn peer_gap_block_request( + id: &PeerId, + peer: &PeerSync, + blocks: &mut BlockCollection, + attrs: message::BlockAttributes, + target: NumberFor, + common_number: NumberFor, +) -> Option<(Range>, BlockRequest)> { + let range = blocks.needed_blocks( + id.clone(), + MAX_BLOCKS_TO_REQUEST, + std::cmp::min(peer.best_number, target), + common_number, + 1, + MAX_DOWNLOAD_AHEAD, + )?; + + // The end is not part of the range. + let last = range.end.saturating_sub(One::one()); + let from = message::FromBlock::Number(last); + + let request = message::generic::BlockRequest { + id: 0, + fields: attrs.clone(), + from, + to: None, + direction: message::Direction::Descending, + max: Some((range.end - range.start).saturated_into::()), + }; + Some((range, request)) +} + /// Get pending fork sync targets for a peer. fn fork_sync_request( id: &PeerId, diff --git a/client/network/src/protocol/sync/state.rs b/client/network/src/protocol/sync/state.rs index d2e4463f98912..e644ba1013e4d 100644 --- a/client/network/src/protocol/sync/state.rs +++ b/client/network/src/protocol/sync/state.rs @@ -47,8 +47,8 @@ pub struct StateSync { pub enum ImportResult { /// State is complete and ready for import. Import(B::Hash, B::Header, ImportedState), - /// Continue dowloading. - Continue(StateRequest), + /// Continue downloading. + Continue, /// Bad state chunk. BadResponse, } @@ -134,7 +134,7 @@ impl StateSync { ImportedState { block: self.target_block, state: std::mem::take(&mut self.state) }, ) } else { - ImportResult::Continue(self.next_request()) + ImportResult::Continue } } diff --git a/client/network/src/protocol/sync/warp.rs b/client/network/src/protocol/sync/warp.rs index 32bd5cb9ed798..bbf8a28da14a6 100644 --- a/client/network/src/protocol/sync/warp.rs +++ b/client/network/src/protocol/sync/warp.rs @@ -37,11 +37,9 @@ enum Phase { } /// Import warp proof result. -pub enum WarpProofImportResult { - /// Start downloading state data. - StateRequest(StateRequest), - /// Continue dowloading warp sync proofs. - WarpProofRequest(WarpProofRequest), +pub enum WarpProofImportResult { + /// Import was successful. + Success, /// Bad proof. BadResponse, } @@ -69,7 +67,7 @@ impl WarpSync { Self { client, warp_sync_provider, phase, total_proof_bytes: 0 } } - /// Validate and import a state reponse. + /// Validate and import a state response. pub fn import_state(&mut self, response: StateResponse) -> ImportResult { match &mut self.phase { Phase::WarpProof { .. } => { @@ -80,19 +78,15 @@ impl WarpSync { } } - /// Validate and import a warp proof reponse. - pub fn import_warp_proof(&mut self, response: EncodedProof) -> WarpProofImportResult { + /// Validate and import a warp proof response. + pub fn import_warp_proof(&mut self, response: EncodedProof) -> WarpProofImportResult { match &mut self.phase { Phase::State(_) => { log::debug!(target: "sync", "Unexpected warp proof response"); WarpProofImportResult::BadResponse }, Phase::WarpProof { set_id, authorities, last_hash } => { - match self.warp_sync_provider.verify( - &response, - *set_id, - std::mem::take(authorities), - ) { + match self.warp_sync_provider.verify(&response, *set_id, authorities.clone()) { Err(e) => { log::debug!(target: "sync", "Bad warp proof response: {:?}", e); return WarpProofImportResult::BadResponse @@ -103,17 +97,14 @@ impl WarpSync { *authorities = new_authorities; *last_hash = new_last_hash.clone(); self.total_proof_bytes += response.0.len() as u64; - WarpProofImportResult::WarpProofRequest(WarpProofRequest { - begin: new_last_hash, - }) + WarpProofImportResult::Success }, Ok(VerificationResult::Complete(new_set_id, _, header)) => { log::debug!(target: "sync", "Verified complete proof, set_id={:?}", new_set_id); self.total_proof_bytes += response.0.len() as u64; let state_sync = StateSync::new(self.client.clone(), header, false); - let request = state_sync.next_request(); self.phase = Phase::State(state_sync); - WarpProofImportResult::StateRequest(request) + WarpProofImportResult::Success }, } }, @@ -161,7 +152,7 @@ impl WarpSync { } /// Returns state sync estimated progress (percentage, bytes) - pub fn progress(&self) -> WarpSyncProgress { + pub fn progress(&self) -> WarpSyncProgress { match &self.phase { Phase::WarpProof { .. } => WarpSyncProgress { phase: WarpSyncPhase::DownloadingWarpProofs, diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 23f9c614d9069..9b6e54f37a663 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -1092,61 +1092,86 @@ impl NetworkService { let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::RemoveReserved(peer_id)); } - /// Add peers to a peer set. + /// Sets the reserved set of a protocol to the given set of peers. /// /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. It can also /// consist of only `/p2p/`. /// /// Returns an `Err` if one of the given addresses is invalid or contains an /// invalid peer ID (which includes the local peer ID). - pub fn add_peers_to_reserved_set( + pub fn set_reserved_peers( &self, protocol: Cow<'static, str>, peers: HashSet, ) -> Result<(), String> { - let peers = self.split_multiaddr_and_peer_id(peers)?; + let peers_addrs = self.split_multiaddr_and_peer_id(peers)?; - for (peer_id, addr) in peers.into_iter() { + let mut peers: HashSet = HashSet::with_capacity(peers_addrs.len()); + + for (peer_id, addr) in peers_addrs.into_iter() { // Make sure the local peer ID is never added to the PSM. if peer_id == self.local_peer_id { return Err("Local peer ID cannot be added as a reserved peer.".to_string()) } + peers.insert(peer_id); + if !addr.is_empty() { let _ = self .to_worker - .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id.clone(), addr)); + .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr)); } - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::AddSetReserved(protocol.clone(), peer_id)); } + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::SetPeersetReserved(protocol, peers)); + Ok(()) } - /// Remove peers from a peer set. + /// Add peers to a peer set. /// - /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. + /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. It can also + /// consist of only `/p2p/`. /// /// Returns an `Err` if one of the given addresses is invalid or contains an /// invalid peer ID (which includes the local peer ID). - // NOTE: technically, this function only needs `Vec`, but we use `Multiaddr` here for - // convenience. - pub fn remove_peers_from_reserved_set( + pub fn add_peers_to_reserved_set( &self, protocol: Cow<'static, str>, peers: HashSet, ) -> Result<(), String> { let peers = self.split_multiaddr_and_peer_id(peers)?; - for (peer_id, _) in peers.into_iter() { + + for (peer_id, addr) in peers.into_iter() { + // Make sure the local peer ID is never added to the PSM. + if peer_id == self.local_peer_id { + return Err("Local peer ID cannot be added as a reserved peer.".to_string()) + } + + if !addr.is_empty() { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id.clone(), addr)); + } let _ = self .to_worker - .unbounded_send(ServiceToWorkerMsg::RemoveSetReserved(protocol.clone(), peer_id)); + .unbounded_send(ServiceToWorkerMsg::AddSetReserved(protocol.clone(), peer_id)); } + Ok(()) } + /// Remove peers from a peer set. + pub fn remove_peers_from_reserved_set(&self, protocol: Cow<'static, str>, peers: Vec) { + for peer_id in peers.into_iter() { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::RemoveSetReserved(protocol.clone(), peer_id)); + } + } + /// Configure an explicit fork sync request. /// Note that this function should not be used for recent blocks. /// Sync should be able to download all the recent forks normally. @@ -1195,25 +1220,12 @@ impl NetworkService { /// Remove peers from a peer set. /// /// If we currently have an open substream with this peer, it will soon be closed. - /// - /// Each `Multiaddr` must end with a `/p2p/` component containing the `PeerId`. - /// - /// Returns an `Err` if one of the given addresses is invalid or contains an - /// invalid peer ID (which includes the local peer ID). - // NOTE: technically, this function only needs `Vec`, but we use `Multiaddr` here for - // convenience. - pub fn remove_from_peers_set( - &self, - protocol: Cow<'static, str>, - peers: HashSet, - ) -> Result<(), String> { - let peers = self.split_multiaddr_and_peer_id(peers)?; - for (peer_id, _) in peers.into_iter() { + pub fn remove_from_peers_set(&self, protocol: Cow<'static, str>, peers: Vec) { + for peer_id in peers.into_iter() { let _ = self .to_worker .unbounded_send(ServiceToWorkerMsg::RemoveFromPeersSet(protocol.clone(), peer_id)); } - Ok(()) } /// Returns the number of peers we're connected to. @@ -1400,6 +1412,7 @@ enum ServiceToWorkerMsg { AddReserved(PeerId), RemoveReserved(PeerId), SetReserved(HashSet), + SetPeersetReserved(Cow<'static, str>, HashSet), AddSetReserved(Cow<'static, str>, PeerId), RemoveSetReserved(Cow<'static, str>, PeerId), AddToPeersSet(Cow<'static, str>, PeerId), @@ -1541,6 +1554,11 @@ impl Future for NetworkWorker { .behaviour_mut() .user_protocol_mut() .set_reserved_peers(peers), + ServiceToWorkerMsg::SetPeersetReserved(protocol, peers) => this + .network_service + .behaviour_mut() + .user_protocol_mut() + .set_reserved_peerset_peers(protocol, peers), ServiceToWorkerMsg::AddReserved(peer_id) => this .network_service .behaviour_mut() @@ -2005,8 +2023,9 @@ impl Future for NetworkWorker { .inc(); } }, - Poll::Ready(SwarmEvent::UnknownPeerUnreachableAddr { address, error }) => - trace!(target: "sub-libp2p", "Libp2p => UnknownPeerUnreachableAddr({}): {}", address, error), + Poll::Ready(SwarmEvent::UnknownPeerUnreachableAddr { address, error }) => { + trace!(target: "sub-libp2p", "Libp2p => UnknownPeerUnreachableAddr({}): {}", address, error) + }, Poll::Ready(SwarmEvent::ListenerClosed { reason, addresses, .. }) => { if let Some(metrics) = this.metrics.as_ref() { metrics.listeners_local_addresses.sub(addresses.len() as u64); diff --git a/client/network/src/state_request_handler.rs b/client/network/src/state_request_handler.rs index b4e5320ebfda8..d2e58ce955197 100644 --- a/client/network/src/state_request_handler.rs +++ b/client/network/src/state_request_handler.rs @@ -78,6 +78,7 @@ struct SeenRequestsKey { start: Vec, } +#[allow(clippy::derive_hash_xor_eq)] impl Hash for SeenRequestsKey { fn hash(&self, state: &mut H) { self.peer.hash(state); diff --git a/client/network/src/transactions.rs b/client/network/src/transactions.rs index 82e7e8fe1714c..99350f603a375 100644 --- a/client/network/src/transactions.rs +++ b/client/network/src/transactions.rs @@ -92,7 +92,7 @@ struct Metrics { impl Metrics { fn register(r: &Registry) -> Result { - Ok(Metrics { + Ok(Self { propagated_transactions: register( Counter::new( "sync_propagated_transactions", @@ -133,7 +133,7 @@ pub struct TransactionsHandlerPrototype { impl TransactionsHandlerPrototype { /// Create a new instance. pub fn new(protocol_id: ProtocolId) -> Self { - TransactionsHandlerPrototype { + Self { protocol_name: Cow::from({ let mut proto = String::new(); proto.push_str("/"); @@ -317,15 +317,10 @@ impl TransactionsHandler { } }, Event::SyncDisconnected { remote } => { - let addr = iter::once(multiaddr::Protocol::P2p(remote.into())) - .collect::(); - let result = self.service.remove_peers_from_reserved_set( + self.service.remove_peers_from_reserved_set( self.protocol_name.clone(), - iter::once(addr).collect(), + iter::once(remote).collect(), ); - if let Err(err) = result { - log::error!(target: "sync", "Removing reserved peer failed: {}", err); - } }, Event::NotificationStreamOpened { remote, protocol, role, .. } @@ -401,7 +396,7 @@ impl TransactionsHandler { let hash = self.transaction_pool.hash_of(&t); peer.known_transactions.insert(hash.clone()); - self.service.report_peer(who.clone(), rep::ANY_TRANSACTION); + self.service.report_peer(who, rep::ANY_TRANSACTION); match self.pending_transactions_peers.entry(hash.clone()) { Entry::Vacant(entry) => { @@ -409,10 +404,10 @@ impl TransactionsHandler { validation: self.transaction_pool.import(t), tx_hash: hash, }); - entry.insert(vec![who.clone()]); + entry.insert(vec![who]); }, Entry::Occupied(mut entry) => { - entry.get_mut().push(who.clone()); + entry.get_mut().push(who); }, } } @@ -468,11 +463,8 @@ impl TransactionsHandler { propagated_to.entry(hash).or_default().push(who.to_base58()); } trace!(target: "sync", "Sending {} transactions to {}", to_send.len(), who); - self.service.write_notification( - who.clone(), - self.protocol_name.clone(), - to_send.encode(), - ); + self.service + .write_notification(*who, self.protocol_name.clone(), to_send.encode()); } } diff --git a/client/network/src/warp_request_handler.rs b/client/network/src/warp_request_handler.rs index 2ab95bb3853ba..ca5a93b752b62 100644 --- a/client/network/src/warp_request_handler.rs +++ b/client/network/src/warp_request_handler.rs @@ -23,10 +23,11 @@ use futures::{ stream::StreamExt, }; use log::debug; -use sp_finality_grandpa::{AuthorityList, SetId}; use sp_runtime::traits::Block as BlockT; use std::{sync::Arc, time::Duration}; +pub use sp_finality_grandpa::{AuthorityList, SetId}; + /// Scale-encoded warp sync proof response. pub struct EncodedProof(pub Vec); @@ -55,7 +56,7 @@ pub trait WarpSyncProvider: Send + Sync { &self, start: B::Hash, ) -> Result>; - /// Verify warp proof agains current set of authorities. + /// Verify warp proof against current set of authorities. fn verify( &self, proof: &EncodedProof, diff --git a/client/network/test/Cargo.toml b/client/network/test/Cargo.toml index 88399ca54a436..13555952cffd5 100644 --- a/client/network/test/Cargo.toml +++ b/client/network/test/Cargo.toml @@ -6,14 +6,14 @@ license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] edition = "2018" publish = false -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-std = "1.6.5" +async-std = "1.10.0" sc-network = { version = "0.10.0-dev", path = "../" } log = "0.4.8" parking_lot = "0.11.1" diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index bb49cef8c642c..fb0012aaf5baf 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -28,8 +28,10 @@ use std::{ pin::Pin, sync::Arc, task::{Context as FutureContext, Poll}, + time::Duration, }; +use async_std::future::timeout; use futures::{future::BoxFuture, prelude::*}; use libp2p::{build_multiaddr, PeerId}; use log::trace; @@ -53,7 +55,7 @@ use sc_network::{ }, light_client_requests::{self, handler::LightClientRequestHandler}, state_request_handler::{self, StateRequestHandler}, - Multiaddr, NetworkService, NetworkWorker, + warp_request_handler, Multiaddr, NetworkService, NetworkWorker, }; use sc_service::client::Client; use sp_blockchain::{ @@ -66,6 +68,7 @@ use sp_consensus::{ }; use sp_core::H256; use sp_runtime::{ + codec::{Decode, Encode}, generic::{BlockId, OpaqueDigestItemId}, traits::{Block as BlockT, Header as HeaderT, NumberFor}, Justification, Justifications, @@ -650,6 +653,33 @@ impl VerifierAdapter { } } +struct TestWarpSyncProvider(Arc>); + +impl warp_request_handler::WarpSyncProvider for TestWarpSyncProvider { + fn generate( + &self, + _start: B::Hash, + ) -> Result> { + let info = self.0.info(); + let best_header = self.0.header(BlockId::hash(info.best_hash)).unwrap().unwrap(); + Ok(warp_request_handler::EncodedProof(best_header.encode())) + } + fn verify( + &self, + proof: &warp_request_handler::EncodedProof, + _set_id: warp_request_handler::SetId, + _authorities: warp_request_handler::AuthorityList, + ) -> Result, Box> + { + let warp_request_handler::EncodedProof(encoded) = proof; + let header = B::Header::decode(&mut encoded.as_slice()).unwrap(); + Ok(warp_request_handler::VerificationResult::Complete(0, Default::default(), header)) + } + fn current_authorities(&self) -> warp_request_handler::AuthorityList { + Default::default() + } +} + /// Configuration for a full peer. #[derive(Default)] pub struct FullPeerConfig { @@ -735,7 +765,7 @@ where (Some(keep_blocks), false) => TestClientBuilder::with_pruning_window(keep_blocks), (None, false) => TestClientBuilder::with_default_backend(), }; - if matches!(config.sync_mode, SyncMode::Fast { .. }) { + if matches!(config.sync_mode, SyncMode::Fast { .. } | SyncMode::Warp) { test_client_builder = test_client_builder.set_no_genesis(); } let backend = test_client_builder.backend(); @@ -814,6 +844,15 @@ where protocol_config }; + let warp_sync = Arc::new(TestWarpSyncProvider(client.clone())); + + let warp_protocol_config = { + let (handler, protocol_config) = + warp_request_handler::RequestHandler::new(protocol_id.clone(), warp_sync.clone()); + self.spawn_task(handler.run().boxed()); + protocol_config + }; + let network = NetworkWorker::new(sc_network::config::Params { role: if config.is_authority { Role::Authority } else { Role::Full }, executor: None, @@ -833,7 +872,7 @@ where block_request_protocol_config, state_request_protocol_config, light_client_request_protocol_config, - warp_sync: None, + warp_sync: Some((warp_sync, warp_protocol_config)), }) .unwrap(); @@ -1017,10 +1056,13 @@ where /// Blocks the current thread until we are sync'ed. /// /// Calls `poll_until_sync` repeatedly. + /// (If we've not synced within 10 mins then panic rather than hang.) fn block_until_sync(&mut self) { - futures::executor::block_on(futures::future::poll_fn::<(), _>(|cx| { - self.poll_until_sync(cx) - })); + futures::executor::block_on(timeout( + Duration::from_secs(10 * 60), + futures::future::poll_fn::<(), _>(|cx| self.poll_until_sync(cx)), + )) + .expect("sync didn't happen within 10 mins"); } /// Blocks the current thread until there are no pending packets. diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index c86ccfeac3ed1..f3af7f8ff6fc3 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -616,9 +616,10 @@ fn syncs_header_only_forks() { let small_hash = net.peer(0).client().info().best_hash; net.peer(1).push_blocks(4, false); - net.block_until_sync(); // Peer 1 will sync the small fork even though common block state is missing - assert!(net.peer(1).has_block(&small_hash)); + while !net.peer(1).has_block(&small_hash) { + net.block_until_idle(); + } } #[test] @@ -855,12 +856,19 @@ fn sync_to_tip_requires_that_sync_protocol_is_informed_about_best_block() { net.block_until_idle(); // Connect another node that should now sync to the tip - net.add_full_peer_with_config(Default::default()); - net.block_until_connected(); + net.add_full_peer_with_config(FullPeerConfig { + connect_to_peers: Some(vec![0]), + ..Default::default() + }); - while !net.peer(2).has_block(&block_hash) { - net.block_until_idle(); - } + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + if net.peer(2).has_block(&block_hash) { + Poll::Ready(()) + } else { + Poll::Pending + } + })); // However peer 1 should still not have the block. assert!(!net.peer(1).has_block(&block_hash)); @@ -1194,6 +1202,38 @@ fn syncs_indexed_blocks() { .is_some()); } +#[test] +fn warp_sync() { + sp_tracing::try_init_simple(); + let mut net = TestNet::new(0); + // Create 3 synced peers and 1 peer trying to warp sync. + net.add_full_peer_with_config(Default::default()); + net.add_full_peer_with_config(Default::default()); + net.add_full_peer_with_config(Default::default()); + net.add_full_peer_with_config(FullPeerConfig { + sync_mode: SyncMode::Warp, + ..Default::default() + }); + let gap_end = net.peer(0).push_blocks(63, false); + net.peer(0).push_blocks(1, false); + net.peer(1).push_blocks(64, false); + net.peer(2).push_blocks(64, false); + // Wait for peer 1 to sync state. + net.block_until_sync(); + assert!(!net.peer(3).client().has_state_at(&BlockId::Number(1))); + assert!(net.peer(3).client().has_state_at(&BlockId::Number(64))); + + // Wait for peer 1 download block history + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + if net.peer(3).has_block(&gap_end) { + Poll::Ready(()) + } else { + Poll::Pending + } + })); +} + #[test] fn syncs_huge_blocks() { use sp_core::storage::well_known_keys::HEAP_PAGES; diff --git a/client/offchain/Cargo.toml b/client/offchain/Cargo.toml index 641a1e55063d0..146ce07e1303b 100644 --- a/client/offchain/Cargo.toml +++ b/client/offchain/Cargo.toml @@ -5,7 +5,7 @@ version = "4.0.0-dev" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] edition = "2018" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" diff --git a/client/offchain/src/api/http.rs b/client/offchain/src/api/http.rs index ce9fb298d1b0c..31f7d60e34ff9 100644 --- a/client/offchain/src/api/http.rs +++ b/client/offchain/src/api/http.rs @@ -700,7 +700,7 @@ mod tests { use super::{http, SharedClient}; use crate::api::timestamp; use core::convert::Infallible; - use futures::future; + use futures::{future, StreamExt}; use lazy_static::lazy_static; use sp_core::offchain::{Duration, HttpError, HttpRequestId, HttpRequestStatus}; @@ -725,7 +725,11 @@ mod tests { let server = hyper::Server::bind(&"127.0.0.1:0".parse().unwrap()).serve( hyper::service::make_service_fn(|_| async move { Ok::<_, Infallible>(hyper::service::service_fn( - move |_req| async move { + move |req: hyper::Request| async move { + // Wait until the complete request was received and processed, + // otherwise the tests are flaky. + let _ = req.into_body().collect::>().await; + Ok::<_, Infallible>(hyper::Response::new(hyper::Body::from( "Hello World!", ))) diff --git a/client/peerset/Cargo.toml b/client/peerset/Cargo.toml index 5962620d6e06e..a7e9130cfff1c 100644 --- a/client/peerset/Cargo.toml +++ b/client/peerset/Cargo.toml @@ -13,7 +13,6 @@ readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] - [dependencies] futures = "0.3.9" libp2p = { version = "0.39.1", default-features = false } diff --git a/client/peerset/src/lib.rs b/client/peerset/src/lib.rs index 0775354befee4..7fbda1ba7b7f8 100644 --- a/client/peerset/src/lib.rs +++ b/client/peerset/src/lib.rs @@ -79,13 +79,13 @@ pub struct SetId(usize); impl SetId { pub const fn from(id: usize) -> Self { - SetId(id) + Self(id) } } impl From for SetId { fn from(id: usize) -> Self { - SetId(id) + Self(id) } } @@ -107,12 +107,12 @@ pub struct ReputationChange { impl ReputationChange { /// New reputation change with given delta and reason. pub const fn new(value: i32, reason: &'static str) -> ReputationChange { - ReputationChange { value, reason } + Self { value, reason } } /// New reputation change that forces minimum possible reputation. pub const fn new_fatal(reason: &'static str) -> ReputationChange { - ReputationChange { value: i32::MIN, reason } + Self { value: i32::MIN, reason } } } @@ -208,8 +208,8 @@ pub enum Message { pub struct IncomingIndex(pub u64); impl From for IncomingIndex { - fn from(val: u64) -> IncomingIndex { - IncomingIndex(val) + fn from(val: u64) -> Self { + Self(val) } } @@ -274,7 +274,7 @@ pub struct Peerset { impl Peerset { /// Builds a new peerset from the given configuration. - pub fn from_config(config: PeersetConfig) -> (Peerset, PeersetHandle) { + pub fn from_config(config: PeersetConfig) -> (Self, PeersetHandle) { let (tx, rx) = tracing_unbounded("mpsc_peerset_messages"); let handle = PeersetHandle { tx: tx.clone() }; @@ -282,7 +282,7 @@ impl Peerset { let mut peerset = { let now = Instant::now(); - Peerset { + Self { data: peersstate::PeersState::new(config.sets.iter().map(|set| { peersstate::SetConfig { in_peers: set.in_peers, out_peers: set.out_peers } })), @@ -322,7 +322,7 @@ impl Peerset { } fn on_add_reserved_peer(&mut self, set_id: SetId, peer_id: PeerId) { - let newly_inserted = self.reserved_nodes[set_id.0].0.insert(peer_id.clone()); + let newly_inserted = self.reserved_nodes[set_id.0].0.insert(peer_id); if !newly_inserted { return } @@ -422,8 +422,7 @@ impl Peerset { match self.data.peer(set_id.0, &peer_id) { peersstate::Peer::Connected(peer) => { - self.message_queue - .push_back(Message::Drop { set_id, peer_id: peer.peer_id().clone() }); + self.message_queue.push_back(Message::Drop { set_id, peer_id: *peer.peer_id() }); peer.disconnect().forget_peer(); }, peersstate::Peer::NotConnected(peer) => { @@ -819,8 +818,8 @@ mod tests { }; let (peerset, handle) = Peerset::from_config(config); - handle.add_reserved_peer(SetId::from(0), reserved_peer.clone()); - handle.add_reserved_peer(SetId::from(0), reserved_peer2.clone()); + handle.add_reserved_peer(SetId::from(0), reserved_peer); + handle.add_reserved_peer(SetId::from(0), reserved_peer2); assert_messages( peerset, @@ -845,22 +844,22 @@ mod tests { sets: vec![SetConfig { in_peers: 2, out_peers: 1, - bootnodes: vec![bootnode.clone()], + bootnodes: vec![bootnode], reserved_nodes: Default::default(), reserved_only: false, }], }; let (mut peerset, _handle) = Peerset::from_config(config); - peerset.incoming(SetId::from(0), incoming.clone(), ii); - peerset.incoming(SetId::from(0), incoming.clone(), ii4); - peerset.incoming(SetId::from(0), incoming2.clone(), ii2); - peerset.incoming(SetId::from(0), incoming3.clone(), ii3); + peerset.incoming(SetId::from(0), incoming, ii); + peerset.incoming(SetId::from(0), incoming, ii4); + peerset.incoming(SetId::from(0), incoming2, ii2); + peerset.incoming(SetId::from(0), incoming3, ii3); assert_messages( peerset, vec![ - Message::Connect { set_id: SetId::from(0), peer_id: bootnode.clone() }, + Message::Connect { set_id: SetId::from(0), peer_id: bootnode }, Message::Accept(ii), Message::Accept(ii2), Message::Reject(ii3), @@ -883,7 +882,7 @@ mod tests { }; let (mut peerset, _) = Peerset::from_config(config); - peerset.incoming(SetId::from(0), incoming.clone(), ii); + peerset.incoming(SetId::from(0), incoming, ii); assert_messages(peerset, vec![Message::Reject(ii)]); } @@ -897,15 +896,15 @@ mod tests { sets: vec![SetConfig { in_peers: 0, out_peers: 2, - bootnodes: vec![bootnode.clone()], + bootnodes: vec![bootnode], reserved_nodes: Default::default(), reserved_only: false, }], }; let (mut peerset, _handle) = Peerset::from_config(config); - peerset.add_to_peers_set(SetId::from(0), discovered.clone()); - peerset.add_to_peers_set(SetId::from(0), discovered.clone()); + peerset.add_to_peers_set(SetId::from(0), discovered); + peerset.add_to_peers_set(SetId::from(0), discovered); peerset.add_to_peers_set(SetId::from(0), discovered2); assert_messages( @@ -931,7 +930,7 @@ mod tests { // We ban a node by setting its reputation under the threshold. let peer_id = PeerId::random(); - handle.report_peer(peer_id.clone(), ReputationChange::new(BANNED_THRESHOLD - 1, "")); + handle.report_peer(peer_id, ReputationChange::new(BANNED_THRESHOLD - 1, "")); let fut = futures::future::poll_fn(move |cx| { // We need one polling for the message to be processed. @@ -974,7 +973,7 @@ mod tests { // We ban a node by setting its reputation under the threshold. let peer_id = PeerId::random(); - handle.report_peer(peer_id.clone(), ReputationChange::new(BANNED_THRESHOLD - 1, "")); + handle.report_peer(peer_id, ReputationChange::new(BANNED_THRESHOLD - 1, "")); let fut = futures::future::poll_fn(move |cx| { // We need one polling for the message to be processed. diff --git a/client/peerset/src/peersstate.rs b/client/peerset/src/peersstate.rs index 7717620eae3a7..d7a9ef9135876 100644 --- a/client/peerset/src/peersstate.rs +++ b/client/peerset/src/peersstate.rs @@ -105,8 +105,8 @@ struct Node { } impl Node { - fn new(num_sets: usize) -> Node { - Node { sets: (0..num_sets).map(|_| MembershipState::NotMember).collect(), reputation: 0 } + fn new(num_sets: usize) -> Self { + Self { sets: (0..num_sets).map(|_| MembershipState::NotMember).collect(), reputation: 0 } } } @@ -128,21 +128,24 @@ enum MembershipState { } impl MembershipState { - /// Returns `true` for `In` and `Out`. + /// Returns `true` for [`MembershipState::In`] and [`MembershipState::Out`]. fn is_connected(self) -> bool { match self { - MembershipState::NotMember => false, - MembershipState::In => true, - MembershipState::Out => true, - MembershipState::NotConnected { .. } => false, + Self::In | Self::Out => true, + Self::NotMember | Self::NotConnected { .. } => false, } } + + /// Returns `true` for [`MembershipState::NotConnected`]. + fn is_not_connected(self) -> bool { + matches!(self, Self::NotConnected { .. }) + } } impl PeersState { - /// Builds a new empty `PeersState`. + /// Builds a new empty [`PeersState`]. pub fn new(sets: impl IntoIterator) -> Self { - PeersState { + Self { nodes: HashMap::new(), sets: sets .into_iter() @@ -242,12 +245,7 @@ impl PeersState { let outcome = self .nodes .iter_mut() - .filter(|(_, Node { sets, .. })| match sets[set] { - MembershipState::NotMember => false, - MembershipState::In => false, - MembershipState::Out => false, - MembershipState::NotConnected { .. } => true, - }) + .filter(|(_, Node { sets, .. })| sets[set].is_not_connected()) .fold(None::<(&PeerId, &mut Node)>, |mut cur_node, to_try| { if let Some(cur_node) = cur_node.take() { if cur_node.1.reputation >= to_try.1.reputation { @@ -318,35 +316,32 @@ pub enum Peer<'a> { } impl<'a> Peer<'a> { - /// If we are the `Connected` variant, returns the inner `ConnectedPeer`. Returns `None` + /// If we are the `Connected` variant, returns the inner [`ConnectedPeer`]. Returns `None` /// otherwise. pub fn into_connected(self) -> Option> { match self { - Peer::Connected(peer) => Some(peer), - Peer::NotConnected(_) => None, - Peer::Unknown(_) => None, + Self::Connected(peer) => Some(peer), + Self::NotConnected(..) | Self::Unknown(..) => None, } } - /// If we are the `Unknown` variant, returns the inner `ConnectedPeer`. Returns `None` + /// If we are the `NotConnected` variant, returns the inner [`NotConnectedPeer`]. Returns `None` /// otherwise. #[cfg(test)] // Feel free to remove this if this function is needed outside of tests pub fn into_not_connected(self) -> Option> { match self { - Peer::Connected(_) => None, - Peer::NotConnected(peer) => Some(peer), - Peer::Unknown(_) => None, + Self::NotConnected(peer) => Some(peer), + Self::Connected(..) | Self::Unknown(..) => None, } } - /// If we are the `Unknown` variant, returns the inner `ConnectedPeer`. Returns `None` + /// If we are the `Unknown` variant, returns the inner [`UnknownPeer`]. Returns `None` /// otherwise. #[cfg(test)] // Feel free to remove this if this function is needed outside of tests pub fn into_unknown(self) -> Option> { match self { - Peer::Connected(_) => None, - Peer::NotConnected(_) => None, - Peer::Unknown(peer) => Some(peer), + Self::Unknown(peer) => Some(peer), + Self::Connected(..) | Self::NotConnected(..) => None, } } } @@ -473,7 +468,7 @@ impl<'a> NotConnectedPeer<'a> { /// the slots are full, the node stays "not connected" and we return `Err`. /// /// Non-slot-occupying nodes don't count towards the number of slots. - pub fn try_outgoing(self) -> Result, NotConnectedPeer<'a>> { + pub fn try_outgoing(self) -> Result, Self> { let is_no_slot_occupy = self.state.sets[self.set].no_slot_nodes.contains(&*self.peer_id); // Note that it is possible for num_out to be strictly superior to the max, in case we were @@ -500,7 +495,7 @@ impl<'a> NotConnectedPeer<'a> { /// the slots are full, the node stays "not connected" and we return `Err`. /// /// Non-slot-occupying nodes don't count towards the number of slots. - pub fn try_accept_incoming(self) -> Result, NotConnectedPeer<'a>> { + pub fn try_accept_incoming(self) -> Result, Self> { let is_no_slot_occupy = self.state.sets[self.set].no_slot_nodes.contains(&*self.peer_id); // Note that it is possible for num_in to be strictly superior to the max, in case we were diff --git a/client/proposer-metrics/Cargo.toml b/client/proposer-metrics/Cargo.toml index ffe5045461f77..296329a5fda77 100644 --- a/client/proposer-metrics/Cargo.toml +++ b/client/proposer-metrics/Cargo.toml @@ -4,7 +4,7 @@ version = "0.9.0" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Basic metrics for block production." readme = "README.md" diff --git a/client/rpc-api/Cargo.toml b/client/rpc-api/Cargo.toml index 6342abb1a3c41..192cf02e7ce17 100644 --- a/client/rpc-api/Cargo.toml +++ b/client/rpc-api/Cargo.toml @@ -4,7 +4,7 @@ version = "0.10.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Substrate RPC interfaces." readme = "README.md" diff --git a/client/rpc-api/src/state/mod.rs b/client/rpc-api/src/state/mod.rs index 620a000c500f4..1cc191197232b 100644 --- a/client/rpc-api/src/state/mod.rs +++ b/client/rpc-api/src/state/mod.rs @@ -301,7 +301,7 @@ pub trait StateApi { /// [substrate storage][1], [transparent keys in substrate][2], /// [querying substrate storage via rpc][3]. /// - /// [1]: https://substrate.dev/docs/en/knowledgebase/advanced/storage#storage-map-key + /// [1]: https://docs.substrate.io/v3/advanced/storage#storage-map-keys /// [2]: https://www.shawntabrizi.com/substrate/transparent-keys-in-substrate/ /// [3]: https://www.shawntabrizi.com/substrate/querying-substrate-storage-via-rpc/ /// diff --git a/client/rpc-servers/Cargo.toml b/client/rpc-servers/Cargo.toml index 26a05a8263dc4..cbbea00d6f576 100644 --- a/client/rpc-servers/Cargo.toml +++ b/client/rpc-servers/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Substrate RPC servers." readme = "README.md" diff --git a/client/rpc-servers/src/lib.rs b/client/rpc-servers/src/lib.rs index 65ed6a914b19a..1ac409d6ba89f 100644 --- a/client/rpc-servers/src/lib.rs +++ b/client/rpc-servers/src/lib.rs @@ -33,6 +33,9 @@ const MEGABYTE: usize = 1024 * 1024; /// Maximal payload accepted by RPC servers. pub const RPC_MAX_PAYLOAD_DEFAULT: usize = 15 * MEGABYTE; +/// Maximal buffer size in WS server. +pub const WS_MAX_BUFFER_CAPACITY_DEFAULT: usize = 16 * MEGABYTE; + /// Default maximum number of connections for WS RPC servers. const WS_MAX_CONNECTIONS: usize = 100; @@ -172,18 +175,32 @@ pub fn start_ws< cors: Option<&Vec>, io: RpcHandler, maybe_max_payload_mb: Option, + maybe_max_out_buffer_capacity_mb: Option, server_metrics: ServerMetrics, tokio_handle: tokio::runtime::Handle, ) -> io::Result { - let rpc_max_payload = maybe_max_payload_mb + let max_payload = maybe_max_payload_mb .map(|mb| mb.saturating_mul(MEGABYTE)) .unwrap_or(RPC_MAX_PAYLOAD_DEFAULT); + let max_out_buffer_capacity = maybe_max_out_buffer_capacity_mb + .map(|mb| mb.saturating_mul(MEGABYTE)) + .unwrap_or(WS_MAX_BUFFER_CAPACITY_DEFAULT); + + if max_payload > max_out_buffer_capacity { + log::warn!( + "maximum payload ({}) is more than maximum output buffer ({}) size in ws server, the payload will actually be limited by the buffer size", + max_payload, + max_out_buffer_capacity, + ) + } + ws::ServerBuilder::with_meta_extractor(io, |context: &ws::RequestContext| { context.sender().into() }) .event_loop_executor(tokio_handle) - .max_payload(rpc_max_payload) + .max_payload(max_payload) .max_connections(max_connections.unwrap_or(WS_MAX_CONNECTIONS)) + .max_out_buffer_capacity(max_out_buffer_capacity) .allowed_origins(map_cors(cors)) .allowed_hosts(hosts_filtering(cors.is_some())) .session_stats(server_metrics) diff --git a/client/rpc/Cargo.toml b/client/rpc/Cargo.toml index 427800f74ddf2..f5c7f99ff7435 100644 --- a/client/rpc/Cargo.toml +++ b/client/rpc/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Substrate Client RPC" readme = "README.md" diff --git a/client/rpc/src/system/tests.rs b/client/rpc/src/system/tests.rs index 14997545031df..c64449ff13aa2 100644 --- a/client/rpc/src/system/tests.rs +++ b/client/rpc/src/system/tests.rs @@ -308,7 +308,10 @@ fn test_add_reset_log_filter() { // Enter log generation / filter reload if std::env::var("TEST_LOG_FILTER").is_ok() { - sc_tracing::logging::LoggerBuilder::new("test_before_add=debug").init().unwrap(); + let mut builder = sc_tracing::logging::LoggerBuilder::new("test_before_add=debug"); + builder.with_log_reloading(true); + builder.init().unwrap(); + for line in std::io::stdin().lock().lines() { let line = line.expect("Failed to read bytes"); if line.contains("add_reload") { diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 5120cc8f4dfaa..589d7848a5b28 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -4,7 +4,7 @@ version = "0.10.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Substrate service. Starts a thread that spins up the network, client, and extrinsic pool. Manages communication between them." readme = "README.md" @@ -31,7 +31,7 @@ parking_lot = "0.11.1" log = "0.4.11" futures-timer = "3.0.1" exit-future = "0.2.0" -pin-project = "1.0.4" +pin-project = "1.0.8" hash-db = "0.15.2" serde = "1.0.126" serde_json = "1.0.68" @@ -73,7 +73,7 @@ sc-offchain = { version = "4.0.0-dev", path = "../offchain" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.9.0" } sc-tracing = { version = "4.0.0-dev", path = "../tracing" } sp-tracing = { version = "4.0.0-dev", path = "../../primitives/tracing" } -tracing = "0.1.25" +tracing = "0.1.29" tracing-futures = { version = "0.2.4" } parity-util-mem = { version = "0.10.0", default-features = false, features = [ "primitive-types", @@ -86,4 +86,4 @@ directories = "3.0.2" [dev-dependencies] substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime/" } -async-std = { version = "1.6.5", default-features = false } +async-std = { version = "1.10.0", default-features = false } diff --git a/client/service/src/chain_ops/check_block.rs b/client/service/src/chain_ops/check_block.rs index 4728e014540ee..5e2a9faaf0c4f 100644 --- a/client/service/src/chain_ops/check_block.rs +++ b/client/service/src/chain_ops/check_block.rs @@ -19,7 +19,7 @@ use crate::error::Error; use codec::Encode; use futures::{future, prelude::*}; -use sc_client_api::{BlockBackend, UsageProvider}; +use sc_client_api::{BlockBackend, HeaderBackend}; use sc_consensus::import_queue::ImportQueue; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; @@ -33,7 +33,7 @@ pub fn check_block( block_id: BlockId, ) -> Pin> + Send>> where - C: BlockBackend + UsageProvider + Send + Sync + 'static, + C: BlockBackend + HeaderBackend + Send + Sync + 'static, B: BlockT + for<'de> serde::Deserialize<'de>, IQ: ImportQueue + 'static, { diff --git a/client/service/src/chain_ops/import_blocks.rs b/client/service/src/chain_ops/import_blocks.rs index 1ba9e0bd61444..a408a06a8170e 100644 --- a/client/service/src/chain_ops/import_blocks.rs +++ b/client/service/src/chain_ops/import_blocks.rs @@ -22,7 +22,7 @@ use futures::{future, prelude::*}; use futures_timer::Delay; use log::{info, warn}; use sc_chain_spec::ChainSpec; -use sc_client_api::UsageProvider; +use sc_client_api::HeaderBackend; use sc_consensus::import_queue::{ BlockImportError, BlockImportStatus, ImportQueue, IncomingBlock, Link, }; @@ -296,7 +296,7 @@ pub fn import_blocks( binary: bool, ) -> Pin> + Send>> where - C: UsageProvider + Send + Sync + 'static, + C: HeaderBackend + Send + Sync + 'static, B: BlockT + for<'de> serde::Deserialize<'de>, IQ: ImportQueue + 'static, { @@ -438,7 +438,7 @@ where info!( "🎉 Imported {} blocks. Best: #{}", read_block_count, - client.usage_info().chain.best_number + client.info().best_number ); return Poll::Ready(Ok(())) } else { @@ -469,7 +469,7 @@ where queue.poll_actions(cx, &mut link); - let best_number = client.usage_info().chain.best_number; + let best_number = client.info().best_number; speedometer.notify_user(best_number); if link.has_error { diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs index 9b8774ce6d497..d7a8b6f227e8f 100644 --- a/client/service/src/client/call_executor.rs +++ b/client/service/src/client/call_executor.rs @@ -41,7 +41,7 @@ use std::{cell::RefCell, panic::UnwindSafe, result, sync::Arc}; pub struct LocalCallExecutor { backend: Arc, executor: E, - wasm_override: Option>, + wasm_override: Option, wasm_substitutes: WasmSubstitutes, spawn_handle: Box, client_config: ClientConfig, @@ -62,7 +62,7 @@ where let wasm_override = client_config .wasm_runtime_overrides .as_ref() - .map(|p| WasmOverride::new(p.clone(), executor.clone())) + .map(|p| WasmOverride::new(p.clone(), &executor)) .transpose()?; let wasm_substitutes = WasmSubstitutes::new( @@ -371,7 +371,7 @@ mod tests { 1, ); - let overrides = crate::client::wasm_override::dummy_overrides(&executor); + let overrides = crate::client::wasm_override::dummy_overrides(); let onchain_code = WrappedRuntimeCode(substrate_test_runtime::wasm_binary_unwrap().into()); let onchain_code = RuntimeCode { code_fetcher: &onchain_code, diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index f7d93d036a3fa..d35c0462b8b05 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -684,8 +684,6 @@ where .. } = import_block; - assert!(justifications.is_some() && finalized || justifications.is_none()); - if !intermediates.is_empty() { return Err(Error::IncompletePipeline) } @@ -779,11 +777,17 @@ where } let info = self.backend.blockchain().info(); + let gap_block = info + .block_gap + .map_or(false, |(start, _)| *import_headers.post().number() == start); + + assert!(justifications.is_some() && finalized || justifications.is_none() || gap_block); // the block is lower than our last finalized block so it must revert // finality, refusing import. if status == blockchain::BlockStatus::Unknown && - *import_headers.post().number() <= info.finalized_number + *import_headers.post().number() <= info.finalized_number && + !gap_block { return Err(sp_blockchain::Error::NotInFinalizedChain) } @@ -854,12 +858,13 @@ where None => None, }; - let is_new_best = finalized || - match fork_choice { - ForkChoiceStrategy::LongestChain => - import_headers.post().number() > &info.best_number, - ForkChoiceStrategy::Custom(v) => v, - }; + let is_new_best = !gap_block && + (finalized || + match fork_choice { + ForkChoiceStrategy::LongestChain => + import_headers.post().number() > &info.best_number, + ForkChoiceStrategy::Custom(v) => v, + }); let leaf_state = if finalized { NewBlockState::Final diff --git a/client/service/src/client/wasm_override.rs b/client/service/src/client/wasm_override.rs index 6d5a071269d4d..3d28467a9cbd9 100644 --- a/client/service/src/client/wasm_override.rs +++ b/client/service/src/client/wasm_override.rs @@ -104,22 +104,19 @@ impl From for sp_blockchain::Error { /// Scrapes WASM from a folder and returns WASM from that folder /// if the runtime spec version matches. #[derive(Clone, Debug)] -pub struct WasmOverride { +pub struct WasmOverride { // Map of runtime spec version -> Wasm Blob overrides: HashMap, - executor: E, } -impl WasmOverride -where - E: RuntimeVersionOf + Clone + 'static, -{ - pub fn new

(path: P, executor: E) -> Result +impl WasmOverride { + pub fn new(path: P, executor: &E) -> Result where P: AsRef, + E: RuntimeVersionOf, { - let overrides = Self::scrape_overrides(path.as_ref(), &executor)?; - Ok(Self { overrides, executor }) + let overrides = Self::scrape_overrides(path.as_ref(), executor)?; + Ok(Self { overrides }) } /// Gets an override by it's runtime spec version. @@ -131,7 +128,10 @@ where /// Scrapes a folder for WASM runtimes. /// Returns a hashmap of the runtime version and wasm runtime code. - fn scrape_overrides(dir: &Path, executor: &E) -> Result> { + fn scrape_overrides(dir: &Path, executor: &E) -> Result> + where + E: RuntimeVersionOf, + { let handle_err = |e: std::io::Error| -> sp_blockchain::Error { WasmOverrideError::Io(dir.to_owned(), e).into() }; @@ -176,11 +176,14 @@ where Ok(overrides) } - fn runtime_version( + fn runtime_version( executor: &E, code: &WasmBlob, heap_pages: Option, - ) -> Result { + ) -> Result + where + E: RuntimeVersionOf, + { let mut ext = BasicExternalities::default(); executor .runtime_version(&mut ext, &code.runtime_code(heap_pages)) @@ -190,15 +193,12 @@ where /// Returns a WasmOverride struct filled with dummy data for testing. #[cfg(test)] -pub fn dummy_overrides(executor: &E) -> WasmOverride -where - E: RuntimeVersionOf + Clone + 'static, -{ +pub fn dummy_overrides() -> WasmOverride { let mut overrides = HashMap::new(); overrides.insert(0, WasmBlob::new(vec![0, 0, 0, 0, 0, 0, 0, 0])); overrides.insert(1, WasmBlob::new(vec![1, 1, 1, 1, 1, 1, 1, 1])); overrides.insert(2, WasmBlob::new(vec![2, 2, 2, 2, 2, 2, 2, 2])); - WasmOverride { overrides, executor: executor.clone() } + WasmOverride { overrides } } #[cfg(test)] diff --git a/client/service/src/config.rs b/client/service/src/config.rs index a98a34b473cee..d3bd4e4e48793 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -97,6 +97,8 @@ pub struct Configuration { pub rpc_methods: RpcMethods, /// Maximum payload of rpc request/responses. pub rpc_max_payload: Option, + /// Maximum size of the output buffer capacity for websocket connections. + pub ws_max_out_buffer_capacity: Option, /// Prometheus endpoint configuration. `None` if disabled. pub prometheus_config: Option, /// Telemetry service URL. `None` if disabled. @@ -118,8 +120,6 @@ pub struct Configuration { pub dev_key_seed: Option, /// Tracing targets pub tracing_targets: Option, - /// Is log filter reloading disabled - pub disable_log_reloading: bool, /// Tracing receiver pub tracing_receiver: sc_tracing::TracingReceiver, /// The size of the instances cache. diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 7284747424aa9..8d8c54cc25f29 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -424,6 +424,7 @@ fn start_rpc_servers< ), )?, config.rpc_max_payload, + config.ws_max_out_buffer_capacity, server_metrics.clone(), config.tokio_handle.clone(), ) diff --git a/client/service/test/Cargo.toml b/client/service/test/Cargo.toml index 85a6dcc9e8b29..aeee4a5f90728 100644 --- a/client/service/test/Cargo.toml +++ b/client/service/test/Cargo.toml @@ -5,14 +5,15 @@ authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" publish = false -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] -hex-literal = "0.3.1" +hex = "0.4" +hex-literal = "0.3.3" tempfile = "3.1.0" tokio = { version = "1.10.0", features = ["time"] } log = "0.4.8" @@ -40,5 +41,5 @@ sc-client-api = { version = "4.0.0-dev", path = "../../api" } sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" } sc-executor = { version = "0.10.0-dev", path = "../../executor" } sp-panic-handler = { version = "3.0.0", path = "../../../primitives/panic-handler" } -parity-scale-codec = "2.0.0" +parity-scale-codec = "2.3.1" sp-tracing = { version = "4.0.0-dev", path = "../../../primitives/tracing" } diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 295e941f7ceb1..8ea605c0ea5be 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -481,25 +481,7 @@ fn best_containing_with_genesis_block() { assert_eq!( genesis_hash.clone(), - block_on(longest_chain_select.finality_target(genesis_hash.clone(), None)) - .unwrap() - .unwrap(), - ); -} - -#[test] -fn best_containing_with_hash_not_found() { - // block tree: - // G - - let (client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); - - let uninserted_block = client.new_block(Default::default()).unwrap().build().unwrap().block; - - assert_eq!( - None, - block_on(longest_chain_select.finality_target(uninserted_block.hash().clone(), None)) - .unwrap(), + block_on(longest_chain_select.finality_target(genesis_hash.clone(), None)).unwrap(), ); } @@ -675,22 +657,10 @@ fn best_containing_on_longest_chain_with_single_chain_3_blocks() { assert_eq!( a2.hash(), - block_on(longest_chain_select.finality_target(genesis_hash, None)) - .unwrap() - .unwrap() - ); - assert_eq!( - a2.hash(), - block_on(longest_chain_select.finality_target(a1.hash(), None)) - .unwrap() - .unwrap() - ); - assert_eq!( - a2.hash(), - block_on(longest_chain_select.finality_target(a2.hash(), None)) - .unwrap() - .unwrap() + block_on(longest_chain_select.finality_target(genesis_hash, None)).unwrap() ); + assert_eq!(a2.hash(), block_on(longest_chain_select.finality_target(a1.hash(), None)).unwrap()); + assert_eq!(a2.hash(), block_on(longest_chain_select.finality_target(a2.hash(), None)).unwrap()); } #[test] @@ -819,343 +789,101 @@ fn best_containing_on_longest_chain_with_multiple_forks() { assert!(leaves.contains(&d2.hash())); assert_eq!(leaves.len(), 4); + let finality_target = |target_hash, number| { + block_on(longest_chain_select.finality_target(target_hash, number)).unwrap() + }; + // search without restriction - assert_eq!( - a5.hash(), - block_on(longest_chain_select.finality_target(genesis_hash, None)) - .unwrap() - .unwrap() - ); - assert_eq!( - a5.hash(), - block_on(longest_chain_select.finality_target(a1.hash(), None)) - .unwrap() - .unwrap() - ); - assert_eq!( - a5.hash(), - block_on(longest_chain_select.finality_target(a2.hash(), None)) - .unwrap() - .unwrap() - ); - assert_eq!( - a5.hash(), - block_on(longest_chain_select.finality_target(a3.hash(), None)) - .unwrap() - .unwrap() - ); - assert_eq!( - a5.hash(), - block_on(longest_chain_select.finality_target(a4.hash(), None)) - .unwrap() - .unwrap() - ); - assert_eq!( - a5.hash(), - block_on(longest_chain_select.finality_target(a5.hash(), None)) - .unwrap() - .unwrap() - ); - assert_eq!( - b4.hash(), - block_on(longest_chain_select.finality_target(b2.hash(), None)) - .unwrap() - .unwrap() - ); - assert_eq!( - b4.hash(), - block_on(longest_chain_select.finality_target(b3.hash(), None)) - .unwrap() - .unwrap() - ); - assert_eq!( - b4.hash(), - block_on(longest_chain_select.finality_target(b4.hash(), None)) - .unwrap() - .unwrap() - ); - assert_eq!( - c3.hash(), - block_on(longest_chain_select.finality_target(c3.hash(), None)) - .unwrap() - .unwrap() - ); - assert_eq!( - d2.hash(), - block_on(longest_chain_select.finality_target(d2.hash(), None)) - .unwrap() - .unwrap() - ); + assert_eq!(a5.hash(), finality_target(genesis_hash, None)); + assert_eq!(a5.hash(), finality_target(a1.hash(), None)); + assert_eq!(a5.hash(), finality_target(a2.hash(), None)); + assert_eq!(a5.hash(), finality_target(a3.hash(), None)); + assert_eq!(a5.hash(), finality_target(a4.hash(), None)); + assert_eq!(a5.hash(), finality_target(a5.hash(), None)); + assert_eq!(b4.hash(), finality_target(b2.hash(), None)); + assert_eq!(b4.hash(), finality_target(b3.hash(), None)); + assert_eq!(b4.hash(), finality_target(b4.hash(), None)); + assert_eq!(c3.hash(), finality_target(c3.hash(), None)); + assert_eq!(d2.hash(), finality_target(d2.hash(), None)); // search only blocks with number <= 5. equivalent to without restriction for this scenario - assert_eq!( - a5.hash(), - block_on(longest_chain_select.finality_target(genesis_hash, Some(5))) - .unwrap() - .unwrap() - ); - assert_eq!( - a5.hash(), - block_on(longest_chain_select.finality_target(a1.hash(), Some(5))) - .unwrap() - .unwrap() - ); - assert_eq!( - a5.hash(), - block_on(longest_chain_select.finality_target(a2.hash(), Some(5))) - .unwrap() - .unwrap() - ); - assert_eq!( - a5.hash(), - block_on(longest_chain_select.finality_target(a3.hash(), Some(5))) - .unwrap() - .unwrap() - ); - assert_eq!( - a5.hash(), - block_on(longest_chain_select.finality_target(a4.hash(), Some(5))) - .unwrap() - .unwrap() - ); - assert_eq!( - a5.hash(), - block_on(longest_chain_select.finality_target(a5.hash(), Some(5))) - .unwrap() - .unwrap() - ); - assert_eq!( - b4.hash(), - block_on(longest_chain_select.finality_target(b2.hash(), Some(5))) - .unwrap() - .unwrap() - ); - assert_eq!( - b4.hash(), - block_on(longest_chain_select.finality_target(b3.hash(), Some(5))) - .unwrap() - .unwrap() - ); - assert_eq!( - b4.hash(), - block_on(longest_chain_select.finality_target(b4.hash(), Some(5))) - .unwrap() - .unwrap() - ); - assert_eq!( - c3.hash(), - block_on(longest_chain_select.finality_target(c3.hash(), Some(5))) - .unwrap() - .unwrap() - ); - assert_eq!( - d2.hash(), - block_on(longest_chain_select.finality_target(d2.hash(), Some(5))) - .unwrap() - .unwrap() - ); + assert_eq!(a5.hash(), finality_target(genesis_hash, Some(5))); + assert_eq!(a5.hash(), finality_target(a1.hash(), Some(5))); + assert_eq!(a5.hash(), finality_target(a2.hash(), Some(5))); + assert_eq!(a5.hash(), finality_target(a3.hash(), Some(5))); + assert_eq!(a5.hash(), finality_target(a4.hash(), Some(5))); + assert_eq!(a5.hash(), finality_target(a5.hash(), Some(5))); + assert_eq!(b4.hash(), finality_target(b2.hash(), Some(5))); + assert_eq!(b4.hash(), finality_target(b3.hash(), Some(5))); + assert_eq!(b4.hash(), finality_target(b4.hash(), Some(5))); + assert_eq!(c3.hash(), finality_target(c3.hash(), Some(5))); + assert_eq!(d2.hash(), finality_target(d2.hash(), Some(5))); // search only blocks with number <= 4 - assert_eq!( - a4.hash(), - block_on(longest_chain_select.finality_target(genesis_hash, Some(4))) - .unwrap() - .unwrap() - ); - assert_eq!( - a4.hash(), - block_on(longest_chain_select.finality_target(a1.hash(), Some(4))) - .unwrap() - .unwrap() - ); - assert_eq!( - a4.hash(), - block_on(longest_chain_select.finality_target(a2.hash(), Some(4))) - .unwrap() - .unwrap() - ); - assert_eq!( - a4.hash(), - block_on(longest_chain_select.finality_target(a3.hash(), Some(4))) - .unwrap() - .unwrap() - ); - assert_eq!( - a4.hash(), - block_on(longest_chain_select.finality_target(a4.hash(), Some(4))) - .unwrap() - .unwrap() - ); - assert_eq!(None, block_on(longest_chain_select.finality_target(a5.hash(), Some(4))).unwrap()); - assert_eq!( - b4.hash(), - block_on(longest_chain_select.finality_target(b2.hash(), Some(4))) - .unwrap() - .unwrap() - ); - assert_eq!( - b4.hash(), - block_on(longest_chain_select.finality_target(b3.hash(), Some(4))) - .unwrap() - .unwrap() - ); - assert_eq!( - b4.hash(), - block_on(longest_chain_select.finality_target(b4.hash(), Some(4))) - .unwrap() - .unwrap() - ); - assert_eq!( - c3.hash(), - block_on(longest_chain_select.finality_target(c3.hash(), Some(4))) - .unwrap() - .unwrap() - ); - assert_eq!( - d2.hash(), - block_on(longest_chain_select.finality_target(d2.hash(), Some(4))) - .unwrap() - .unwrap() - ); + assert_eq!(a4.hash(), finality_target(genesis_hash, Some(4))); + assert_eq!(a4.hash(), finality_target(a1.hash(), Some(4))); + assert_eq!(a4.hash(), finality_target(a2.hash(), Some(4))); + assert_eq!(a4.hash(), finality_target(a3.hash(), Some(4))); + assert_eq!(a4.hash(), finality_target(a4.hash(), Some(4))); + assert_eq!(a5.hash(), finality_target(a5.hash(), Some(4))); + assert_eq!(b4.hash(), finality_target(b2.hash(), Some(4))); + assert_eq!(b4.hash(), finality_target(b3.hash(), Some(4))); + assert_eq!(b4.hash(), finality_target(b4.hash(), Some(4))); + assert_eq!(c3.hash(), finality_target(c3.hash(), Some(4))); + assert_eq!(d2.hash(), finality_target(d2.hash(), Some(4))); // search only blocks with number <= 3 - assert_eq!( - a3.hash(), - block_on(longest_chain_select.finality_target(genesis_hash, Some(3))) - .unwrap() - .unwrap() - ); - assert_eq!( - a3.hash(), - block_on(longest_chain_select.finality_target(a1.hash(), Some(3))) - .unwrap() - .unwrap() - ); - assert_eq!( - a3.hash(), - block_on(longest_chain_select.finality_target(a2.hash(), Some(3))) - .unwrap() - .unwrap() - ); - assert_eq!( - a3.hash(), - block_on(longest_chain_select.finality_target(a3.hash(), Some(3))) - .unwrap() - .unwrap() - ); - assert_eq!(None, block_on(longest_chain_select.finality_target(a4.hash(), Some(3))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(a5.hash(), Some(3))).unwrap()); - assert_eq!( - b3.hash(), - block_on(longest_chain_select.finality_target(b2.hash(), Some(3))) - .unwrap() - .unwrap() - ); - assert_eq!( - b3.hash(), - block_on(longest_chain_select.finality_target(b3.hash(), Some(3))) - .unwrap() - .unwrap() - ); - assert_eq!(None, block_on(longest_chain_select.finality_target(b4.hash(), Some(3))).unwrap()); - assert_eq!( - c3.hash(), - block_on(longest_chain_select.finality_target(c3.hash(), Some(3))) - .unwrap() - .unwrap() - ); - assert_eq!( - d2.hash(), - block_on(longest_chain_select.finality_target(d2.hash(), Some(3))) - .unwrap() - .unwrap() - ); + assert_eq!(a3.hash(), finality_target(genesis_hash, Some(3))); + assert_eq!(a3.hash(), finality_target(a1.hash(), Some(3))); + assert_eq!(a3.hash(), finality_target(a2.hash(), Some(3))); + assert_eq!(a3.hash(), finality_target(a3.hash(), Some(3))); + assert_eq!(a4.hash(), finality_target(a4.hash(), Some(3))); + assert_eq!(a5.hash(), finality_target(a5.hash(), Some(3))); + assert_eq!(b3.hash(), finality_target(b2.hash(), Some(3))); + assert_eq!(b3.hash(), finality_target(b3.hash(), Some(3))); + assert_eq!(b4.hash(), finality_target(b4.hash(), Some(3))); + assert_eq!(c3.hash(), finality_target(c3.hash(), Some(3))); + assert_eq!(d2.hash(), finality_target(d2.hash(), Some(3))); // search only blocks with number <= 2 - assert_eq!( - a2.hash(), - block_on(longest_chain_select.finality_target(genesis_hash, Some(2))) - .unwrap() - .unwrap() - ); - assert_eq!( - a2.hash(), - block_on(longest_chain_select.finality_target(a1.hash(), Some(2))) - .unwrap() - .unwrap() - ); - assert_eq!( - a2.hash(), - block_on(longest_chain_select.finality_target(a2.hash(), Some(2))) - .unwrap() - .unwrap() - ); - assert_eq!(None, block_on(longest_chain_select.finality_target(a3.hash(), Some(2))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(a4.hash(), Some(2))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(a5.hash(), Some(2))).unwrap()); - assert_eq!( - b2.hash(), - block_on(longest_chain_select.finality_target(b2.hash(), Some(2))) - .unwrap() - .unwrap() - ); - assert_eq!(None, block_on(longest_chain_select.finality_target(b3.hash(), Some(2))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(b4.hash(), Some(2))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(c3.hash(), Some(2))).unwrap()); - assert_eq!( - d2.hash(), - block_on(longest_chain_select.finality_target(d2.hash(), Some(2))) - .unwrap() - .unwrap() - ); + assert_eq!(a2.hash(), finality_target(genesis_hash, Some(2))); + assert_eq!(a2.hash(), finality_target(a1.hash(), Some(2))); + assert_eq!(a2.hash(), finality_target(a2.hash(), Some(2))); + assert_eq!(a3.hash(), finality_target(a3.hash(), Some(2))); + assert_eq!(a4.hash(), finality_target(a4.hash(), Some(2))); + assert_eq!(a5.hash(), finality_target(a5.hash(), Some(2))); + assert_eq!(b2.hash(), finality_target(b2.hash(), Some(2))); + assert_eq!(b3.hash(), finality_target(b3.hash(), Some(2))); + assert_eq!(b4.hash(), finality_target(b4.hash(), Some(2))); + assert_eq!(c3.hash(), finality_target(c3.hash(), Some(2))); + assert_eq!(d2.hash(), finality_target(d2.hash(), Some(2))); // search only blocks with number <= 1 - assert_eq!( - a1.hash(), - block_on(longest_chain_select.finality_target(genesis_hash, Some(1))) - .unwrap() - .unwrap() - ); - assert_eq!( - a1.hash(), - block_on(longest_chain_select.finality_target(a1.hash(), Some(1))) - .unwrap() - .unwrap() - ); - assert_eq!(None, block_on(longest_chain_select.finality_target(a2.hash(), Some(1))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(a3.hash(), Some(1))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(a4.hash(), Some(1))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(a5.hash(), Some(1))).unwrap()); - - assert_eq!(None, block_on(longest_chain_select.finality_target(b2.hash(), Some(1))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(b3.hash(), Some(1))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(b4.hash(), Some(1))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(c3.hash(), Some(1))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(d2.hash(), Some(1))).unwrap()); + assert_eq!(a1.hash(), finality_target(genesis_hash, Some(1))); + assert_eq!(a1.hash(), finality_target(a1.hash(), Some(1))); + assert_eq!(a2.hash(), finality_target(a2.hash(), Some(1))); + assert_eq!(a3.hash(), finality_target(a3.hash(), Some(1))); + assert_eq!(a4.hash(), finality_target(a4.hash(), Some(1))); + assert_eq!(a5.hash(), finality_target(a5.hash(), Some(1))); + + assert_eq!(b2.hash(), finality_target(b2.hash(), Some(1))); + assert_eq!(b3.hash(), finality_target(b3.hash(), Some(1))); + assert_eq!(b4.hash(), finality_target(b4.hash(), Some(1))); + assert_eq!(c3.hash(), finality_target(c3.hash(), Some(1))); + assert_eq!(d2.hash(), finality_target(d2.hash(), Some(1))); // search only blocks with number <= 0 - assert_eq!( - genesis_hash, - block_on(longest_chain_select.finality_target(genesis_hash, Some(0))) - .unwrap() - .unwrap() - ); - assert_eq!(None, block_on(longest_chain_select.finality_target(a1.hash(), Some(0))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(a2.hash(), Some(0))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(a3.hash(), Some(0))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(a4.hash(), Some(0))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(a5.hash(), Some(0))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(b2.hash(), Some(0))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(b3.hash(), Some(0))).unwrap()); - assert_eq!(None, block_on(longest_chain_select.finality_target(b4.hash(), Some(0))).unwrap()); - assert_eq!( - None, - block_on(longest_chain_select.finality_target(c3.hash().clone(), Some(0))).unwrap(), - ); - assert_eq!( - None, - block_on(longest_chain_select.finality_target(d2.hash().clone(), Some(0))).unwrap(), - ); + assert_eq!(genesis_hash, finality_target(genesis_hash, Some(0))); + assert_eq!(a1.hash(), finality_target(a1.hash(), Some(0))); + assert_eq!(a2.hash(), finality_target(a2.hash(), Some(0))); + assert_eq!(a3.hash(), finality_target(a3.hash(), Some(0))); + assert_eq!(a4.hash(), finality_target(a4.hash(), Some(0))); + assert_eq!(a5.hash(), finality_target(a5.hash(), Some(0))); + assert_eq!(b2.hash(), finality_target(b2.hash(), Some(0))); + assert_eq!(b3.hash(), finality_target(b3.hash(), Some(0))); + assert_eq!(b4.hash(), finality_target(b4.hash(), Some(0))); + assert_eq!(c3.hash(), finality_target(c3.hash(), Some(0))); + assert_eq!(d2.hash(), finality_target(d2.hash(), Some(0))); } #[test] @@ -1177,9 +905,7 @@ fn best_containing_on_longest_chain_with_max_depth_higher_than_best() { assert_eq!( a2.hash(), - block_on(longest_chain_select.finality_target(genesis_hash, Some(10))) - .unwrap() - .unwrap(), + block_on(longest_chain_select.finality_target(genesis_hash, Some(10))).unwrap(), ); } @@ -2033,12 +1759,21 @@ fn storage_keys_iter_works() { let res: Vec<_> = client .storage_keys_iter(&BlockId::Number(0), Some(&prefix), None) .unwrap() - .take(2) - .map(|x| x.0) + .take(8) + .map(|x| hex::encode(&x.0)) .collect(); assert_eq!( res, - [hex!("0befda6e1ca4ef40219d588a727f1271").to_vec(), hex!("3a636f6465").to_vec()] + [ + "00c232cf4e70a5e343317016dc805bf80a6a8cd8ad39958d56f99891b07851e0", + "085b2407916e53a86efeb8b72dbe338c4b341dab135252f96b6ed8022209b6cb", + "0befda6e1ca4ef40219d588a727f1271", + "1a560ecfd2a62c2b8521ef149d0804eb621050e3988ed97dca55f0d7c3e6aa34", + "1d66850d32002979d67dd29dc583af5b2ae2a1f71c1f35ad90fff122be7a3824", + "237498b98d8803334286e9f0483ef513098dd3c1c22ca21c4dc155b4ef6cc204", + "29b9db10ec5bf7907d8f74b5e60aa8140c4fbdd8127a1ee5600cb98e5ec01729", + "3a636f6465", + ] ); let res: Vec<_> = client @@ -2048,15 +1783,19 @@ fn storage_keys_iter_works() { Some(&StorageKey(hex!("3a636f6465").to_vec())), ) .unwrap() - .take(3) - .map(|x| x.0) + .take(7) + .map(|x| hex::encode(&x.0)) .collect(); assert_eq!( res, [ - hex!("3a686561707061676573").to_vec(), - hex!("6644b9b8bc315888ac8e41a7968dc2b4141a5403c58acdf70b7e8f7e07bf5081").to_vec(), - hex!("79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d").to_vec(), + "3a686561707061676573", + "52008686cc27f6e5ed83a216929942f8bcd32a396f09664a5698f81371934b56", + "5348d72ac6cc66e5d8cbecc27b0e0677503b845fe2382d819f83001781788fd5", + "5c2d5fda66373dabf970e4fb13d277ce91c5233473321129d32b5a8085fa8133", + "6644b9b8bc315888ac8e41a7968dc2b4141a5403c58acdf70b7e8f7e07bf5081", + "66484000ed3f75c95fc7b03f39c20ca1e1011e5999278247d3b2f5e3c3273808", + "79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d", ] ); @@ -2069,12 +1808,18 @@ fn storage_keys_iter_works() { )), ) .unwrap() - .take(1) - .map(|x| x.0) + .take(5) + .map(|x| hex::encode(x.0)) .collect(); assert_eq!( res, - [hex!("cf722c0832b5231d35e29f319ff27389f5032bfc7bfc3ba5ed7839f2042fb99f").to_vec()] + [ + "7d5007603a7f5dd729d51d93cf695d6465789443bb967c0d1fe270e388c96eaa", + "811ecfaadcf5f2ee1d67393247e2f71a1662d433e8ce7ff89fb0d4aa9561820b", + "a93d74caa7ec34ea1b04ce1e5c090245f867d333f0f88278a451e45299654dc5", + "a9ee1403384afbfc13f13be91ff70bfac057436212e53b9733914382ac942892", + "cf722c0832b5231d35e29f319ff27389f5032bfc7bfc3ba5ed7839f2042fb99f", + ] ); } @@ -2085,12 +1830,7 @@ fn cleans_up_closed_notification_sinks_on_block_import() { // NOTE: we need to build the client here instead of using the client // provided by test_runtime_client otherwise we can't access the private // `import_notification_sinks` and `finality_notification_sinks` fields. - let mut client = new_in_mem::< - _, - substrate_test_runtime_client::runtime::Block, - _, - substrate_test_runtime_client::runtime::RuntimeApi, - >( + let mut client = new_in_mem::<_, Block, _, RuntimeApi>( substrate_test_runtime_client::new_native_executor(), &substrate_test_runtime_client::GenesisParameters::default().genesis_storage(), None, @@ -2108,8 +1848,8 @@ fn cleans_up_closed_notification_sinks_on_block_import() { in_mem::Backend, sc_executor::NativeElseWasmExecutor, >, - substrate_test_runtime_client::runtime::Block, - substrate_test_runtime_client::runtime::RuntimeApi, + Block, + RuntimeApi, >; let import_notif1 = client.import_notification_stream(); diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 8000c536cdf93..a4e740aabc18e 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -261,6 +261,7 @@ fn node_config< rpc_cors: None, rpc_methods: Default::default(), rpc_max_payload: None, + ws_max_out_buffer_capacity: None, prometheus_config: None, telemetry_endpoints: None, default_heap_pages: None, @@ -274,7 +275,6 @@ fn node_config< announce_block: true, base_path: Some(BasePath::new(root)), informant_output_format: Default::default(), - disable_log_reloading: false, } } diff --git a/client/state-db/Cargo.toml b/client/state-db/Cargo.toml index 93d5e1464b39b..136fe7a199f0c 100644 --- a/client/state-db/Cargo.toml +++ b/client/state-db/Cargo.toml @@ -4,7 +4,7 @@ version = "0.10.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "State database maintenance. Handles canonicalization and pruning in the database." readme = "README.md" diff --git a/client/sync-state-rpc/Cargo.toml b/client/sync-state-rpc/Cargo.toml index b81fd1fd5c611..4166929ff0317 100644 --- a/client/sync-state-rpc/Cargo.toml +++ b/client/sync-state-rpc/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] description = "A RPC handler to create sync states for light clients." edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" diff --git a/client/telemetry/Cargo.toml b/client/telemetry/Cargo.toml index f115017f09701..502c7fc20781f 100644 --- a/client/telemetry/Cargo.toml +++ b/client/telemetry/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] description = "Telemetry utils" edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" documentation = "https://docs.rs/sc-telemetry" readme = "README.md" @@ -20,7 +20,7 @@ futures = "0.3.9" wasm-timer = "0.2.5" libp2p = { version = "0.39.1", default-features = false, features = ["dns-async-std", "tcp-async-io", "wasm-ext", "websocket"] } log = "0.4.8" -pin-project = "1.0.4" +pin-project = "1.0.8" rand = "0.7.2" serde = { version = "1.0.126", features = ["derive"] } serde_json = "1.0.68" diff --git a/client/tracing/Cargo.toml b/client/tracing/Cargo.toml index 3e314a82aa583..b4049fa097ff8 100644 --- a/client/tracing/Cargo.toml +++ b/client/tracing/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] edition = "2018" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Instrumentation implementation for substrate." readme = "README.md" @@ -15,15 +15,16 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] ansi_term = "0.12.1" atty = "0.2.13" +chrono = "0.4.19" lazy_static = "1.4.0" log = { version = "0.4.8" } -once_cell = "1.4.1" +once_cell = "1.8.0" parking_lot = "0.11.1" -regex = "1.4.2" +regex = "1.5.4" rustc-hash = "1.1.0" serde = "1.0.126" thiserror = "1.0.21" -tracing = "0.1.25" +tracing = "0.1.29" tracing-log = "0.1.2" tracing-subscriber = "0.2.19" sp-tracing = { version = "4.0.0-dev", path = "../../primitives/tracing" } @@ -35,3 +36,10 @@ sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } sc-client-api = { version = "4.0.0-dev", path = "../api" } sc-tracing-proc-macro = { version = "4.0.0-dev", path = "./proc-macro" } sc-rpc-server = { version = "4.0.0-dev", path = "../rpc-servers" } + +[dev-dependencies] +criterion = "0.3" + +[[bench]] +name = "bench" +harness = false diff --git a/client/tracing/benches/bench.rs b/client/tracing/benches/bench.rs new file mode 100644 index 0000000000000..ee218958be9b3 --- /dev/null +++ b/client/tracing/benches/bench.rs @@ -0,0 +1,49 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use criterion::{criterion_group, criterion_main, Criterion}; +use tracing_subscriber::fmt::time::{ChronoLocal, FormatTime}; + +fn bench_fast_local_time(c: &mut Criterion) { + c.bench_function("fast_local_time", |b| { + let mut buffer = String::new(); + let t = sc_tracing::logging::FastLocalTime { with_fractional: true }; + b.iter(|| { + buffer.clear(); + t.format_time(&mut buffer).unwrap(); + }) + }); +} + +// This is here just as a point of comparison. +fn bench_chrono_local(c: &mut Criterion) { + c.bench_function("chrono_local", |b| { + let mut buffer = String::new(); + let t = ChronoLocal::with_format("%Y-%m-%d %H:%M:%S%.3f".to_string()); + b.iter(|| { + buffer.clear(); + t.format_time(&mut buffer).unwrap(); + }) + }); +} + +criterion_group! { + name = benches; + config = Criterion::default(); + targets = bench_fast_local_time, bench_chrono_local +} +criterion_main!(benches); diff --git a/client/tracing/proc-macro/Cargo.toml b/client/tracing/proc-macro/Cargo.toml index 002370b515f28..5cc2d836dcada 100644 --- a/client/tracing/proc-macro/Cargo.toml +++ b/client/tracing/proc-macro/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Helper macros for Substrate's client CLI" @@ -17,5 +17,5 @@ proc-macro = true [dependencies] proc-macro-crate = "1.0.0" proc-macro2 = "1.0.29" -quote = { version = "1.0.3", features = ["proc-macro"] } -syn = { version = "1.0.58", features = ["proc-macro", "full", "extra-traits", "parsing"] } +quote = { version = "1.0.10", features = ["proc-macro"] } +syn = { version = "1.0.80", features = ["proc-macro", "full", "extra-traits", "parsing"] } diff --git a/client/tracing/src/logging/directives.rs b/client/tracing/src/logging/directives.rs index 5aaeb4d17e7d3..16f68654de1eb 100644 --- a/client/tracing/src/logging/directives.rs +++ b/client/tracing/src/logging/directives.rs @@ -17,8 +17,7 @@ use once_cell::sync::OnceCell; use parking_lot::Mutex; use tracing_subscriber::{ - filter::Directive, fmt as tracing_fmt, fmt::time::ChronoLocal, layer, reload::Handle, - EnvFilter, Registry, + filter::Directive, fmt as tracing_fmt, layer, reload::Handle, EnvFilter, Registry, }; // Handle to reload the tracing log filter @@ -109,6 +108,6 @@ pub(crate) fn set_reload_handle(handle: Handle) { // Used in the reload `Handle`. type SCSubscriber< N = tracing_fmt::format::DefaultFields, - E = crate::logging::EventFormat, + E = crate::logging::EventFormat, W = fn() -> std::io::Stderr, > = layer::Layered, Registry>; diff --git a/client/tracing/src/logging/event_format.rs b/client/tracing/src/logging/event_format.rs index 61d7fe77aec68..5a21192d69c4d 100644 --- a/client/tracing/src/logging/event_format.rs +++ b/client/tracing/src/logging/event_format.rs @@ -16,6 +16,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use crate::logging::fast_local_time::FastLocalTime; use ansi_term::Colour; use regex::Regex; use std::fmt::{self, Write}; @@ -23,16 +24,13 @@ use tracing::{Event, Level, Subscriber}; use tracing_log::NormalizeEvent; use tracing_subscriber::{ field::RecordFields, - fmt::{ - time::{FormatTime, SystemTime}, - FmtContext, FormatEvent, FormatFields, - }, + fmt::{time::FormatTime, FmtContext, FormatEvent, FormatFields}, layer::Context, registry::{LookupSpan, SpanRef}, }; /// A pre-configured event formatter. -pub struct EventFormat { +pub struct EventFormat { /// Use the given timer for log message timestamps. pub timer: T, /// Sets whether or not an event's target is displayed. diff --git a/client/tracing/src/logging/fast_local_time.rs b/client/tracing/src/logging/fast_local_time.rs new file mode 100644 index 0000000000000..288e44aa5e3af --- /dev/null +++ b/client/tracing/src/logging/fast_local_time.rs @@ -0,0 +1,160 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use chrono::{Datelike, Timelike}; +use std::{cell::RefCell, fmt::Write, time::SystemTime}; +use tracing_subscriber::fmt::time::FormatTime; + +/// A structure which, when `Display`d, will print out the current local time. +#[derive(Debug, Clone, Copy, Eq, PartialEq, Default)] +pub struct FastLocalTime { + /// Decides whenever the fractional timestamp with be included in the output. + /// + /// If `false` the output will match the following `chrono` format string: + /// `%Y-%m-%d %H:%M:%S` + /// + /// If `true` the output will match the following `chrono` format string: + /// `%Y-%m-%d %H:%M:%S%.3f` + pub with_fractional: bool, +} + +// This is deliberately slightly larger than we actually need, just in case. +const TIMESTAMP_MAXIMUM_LENGTH: usize = 32; + +#[derive(Default)] +struct InlineString { + buffer: [u8; TIMESTAMP_MAXIMUM_LENGTH], + length: usize, +} + +impl Write for InlineString { + fn write_str(&mut self, s: &str) -> std::fmt::Result { + let new_length = self.length + s.len(); + assert!( + new_length <= TIMESTAMP_MAXIMUM_LENGTH, + "buffer overflow when formatting the current timestamp" + ); + + self.buffer[self.length..new_length].copy_from_slice(s.as_bytes()); + self.length = new_length; + Ok(()) + } +} + +impl InlineString { + fn as_str(&self) -> &str { + // SAFETY: this is safe since the only place we append to the buffer + // is in `write_str` from an `&str` + unsafe { std::str::from_utf8_unchecked(&self.buffer[..self.length]) } + } +} + +#[derive(Default)] +struct CachedTimestamp { + buffer: InlineString, + last_regenerated_at: u64, + last_fractional: u32, +} + +thread_local! { + static TIMESTAMP: RefCell = Default::default(); +} + +impl FormatTime for FastLocalTime { + fn format_time(&self, w: &mut dyn Write) -> std::fmt::Result { + const TIMESTAMP_PARTIAL_LENGTH: usize = "0000-00-00 00:00:00".len(); + + let elapsed = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .expect("system time is never before UNIX epoch; qed"); + let unix_time = elapsed.as_secs(); + + TIMESTAMP.with(|cache| { + let mut cache = cache.borrow_mut(); + + // Regenerate the timestamp only at most once each second. + if cache.last_regenerated_at != unix_time { + let ts = chrono::Local::now(); + let fractional = (ts.nanosecond() % 1_000_000_000) / 1_000_000; + cache.last_regenerated_at = unix_time; + cache.last_fractional = fractional; + cache.buffer.length = 0; + + write!( + &mut cache.buffer, + "{:04}-{:02}-{:02} {:02}:{:02}:{:02}.{:03}", + ts.year(), + ts.month(), + ts.day(), + ts.hour(), + ts.minute(), + ts.second(), + fractional + )?; + } else if self.with_fractional { + let fractional = elapsed.subsec_millis(); + + // Regenerate the fractional part at most once each millisecond. + if cache.last_fractional != fractional { + cache.last_fractional = fractional; + cache.buffer.length = TIMESTAMP_PARTIAL_LENGTH + 1; + write!(&mut cache.buffer, "{:03}", fractional)?; + } + } + + let mut slice = cache.buffer.as_str(); + if !self.with_fractional { + slice = &slice[..TIMESTAMP_PARTIAL_LENGTH]; + } + + w.write_str(slice) + }) + } +} + +impl std::fmt::Display for FastLocalTime { + fn fmt(&self, w: &mut std::fmt::Formatter) -> std::fmt::Result { + self.format_time(w) + } +} + +#[test] +fn test_format_fast_local_time() { + assert_eq!( + chrono::Local::now().format("%Y-%m-%d %H:%M:%S").to_string().len(), + FastLocalTime { with_fractional: false }.to_string().len() + ); + assert_eq!( + chrono::Local::now().format("%Y-%m-%d %H:%M:%S%.3f").to_string().len(), + FastLocalTime { with_fractional: true }.to_string().len() + ); + + // A simple trick to make sure this test won't randomly fail if we so happen + // to land on the exact moment when we tick over to the next second. + let now_1 = FastLocalTime { with_fractional: false }.to_string(); + let expected = chrono::Local::now().format("%Y-%m-%d %H:%M:%S").to_string(); + let now_2 = FastLocalTime { with_fractional: false }.to_string(); + + assert!( + now_1 == expected || now_2 == expected, + "'{}' or '{}' should have been equal to '{}'", + now_1, + now_2, + expected + ); +} diff --git a/client/tracing/src/logging/mod.rs b/client/tracing/src/logging/mod.rs index dd4830fe89752..c6a4f070176e8 100644 --- a/client/tracing/src/logging/mod.rs +++ b/client/tracing/src/logging/mod.rs @@ -24,6 +24,7 @@ mod directives; mod event_format; +mod fast_local_time; mod layers; pub use directives::*; @@ -34,8 +35,8 @@ use tracing::Subscriber; use tracing_subscriber::{ filter::LevelFilter, fmt::{ - format, time::ChronoLocal, FormatEvent, FormatFields, Formatter, Layer as FmtLayer, - MakeWriter, SubscriberBuilder, + format, FormatEvent, FormatFields, Formatter, Layer as FmtLayer, MakeWriter, + SubscriberBuilder, }, layer::{self, SubscriberExt}, registry::LookupSpan, @@ -43,6 +44,7 @@ use tracing_subscriber::{ }; pub use event_format::*; +pub use fast_local_time::FastLocalTime; pub use layers::*; /// Logging Result typedef. @@ -89,12 +91,7 @@ fn prepare_subscriber( profiling_targets: Option<&str>, force_colors: Option, builder_hook: impl Fn( - SubscriberBuilder< - format::DefaultFields, - EventFormat, - EnvFilter, - fn() -> std::io::Stderr, - >, + SubscriberBuilder std::io::Stderr>, ) -> SubscriberBuilder, ) -> Result LookupSpan<'a>> where @@ -161,11 +158,7 @@ where }; let enable_color = force_colors.unwrap_or_else(|| atty::is(atty::Stream::Stderr)); - let timer = ChronoLocal::with_format(if simple { - "%Y-%m-%d %H:%M:%S".to_string() - } else { - "%Y-%m-%d %H:%M:%S%.3f".to_string() - }); + let timer = fast_local_time::FastLocalTime { with_fractional: !simple }; let event_format = EventFormat { timer, @@ -204,7 +197,7 @@ impl LoggerBuilder { Self { directives: directives.into(), profiling: None, - log_reloading: true, + log_reloading: false, force_colors: None, } } diff --git a/client/transaction-pool/Cargo.toml b/client/transaction-pool/Cargo.toml index 2184af819adf7..12642559a3b8d 100644 --- a/client/transaction-pool/Cargo.toml +++ b/client/transaction-pool/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Substrate transaction pool implementation." readme = "README.md" @@ -32,7 +32,7 @@ sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } sc-utils = { version = "4.0.0-dev", path = "../utils" } serde = { version = "1.0.126", features = ["derive"] } linked-hash-map = "0.5.4" -retain_mut = "0.1.3" +retain_mut = "0.1.4" [dev-dependencies] assert_matches = "1.3.0" diff --git a/client/transaction-pool/api/Cargo.toml b/client/transaction-pool/api/Cargo.toml index efef36071f083..7dd1a6724ce59 100644 --- a/client/transaction-pool/api/Cargo.toml +++ b/client/transaction-pool/api/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Transaction pool client facing API." diff --git a/client/transaction-pool/api/src/lib.rs b/client/transaction-pool/api/src/lib.rs index a6252f1373c5d..cd8784bfc83e2 100644 --- a/client/transaction-pool/api/src/lib.rs +++ b/client/transaction-pool/api/src/lib.rs @@ -223,13 +223,14 @@ pub trait TransactionPool: Send + Sync { at: NumberFor, ) -> Pin< Box< - dyn Future> + Send>> - + Send, + dyn Future< + Output = Box> + Send>, + > + Send, >, >; /// Get an iterator for ready transactions ordered by priority. - fn ready(&self) -> Box> + Send>; + fn ready(&self) -> Box> + Send>; // *** Block production /// Remove transactions identified by given hashes (and dependent transactions) from the pool. @@ -254,6 +255,27 @@ pub trait TransactionPool: Send + Sync { fn ready_transaction(&self, hash: &TxHash) -> Option>; } +/// An iterator of ready transactions. +/// +/// The trait extends regular [`std::iter::Iterator`] trait and allows reporting +/// last-returned element as invalid. +/// +/// The implementation is then allowed, for performance reasons, to change the elements +/// returned next, by e.g. skipping elements that are known to depend on the reported +/// transaction, which yields them invalid as well. +pub trait ReadyTransactions: Iterator { + /// Report given transaction as invalid. + /// + /// This might affect subsequent elements returned by the iterator, so dependent transactions + /// are skipped for performance reasons. + fn report_invalid(&mut self, _tx: &Self::Item); +} + +/// A no-op implementation for an empty iterator. +impl ReadyTransactions for std::iter::Empty { + fn report_invalid(&mut self, _tx: &T) {} +} + /// Events that the transaction pool listens for. pub enum ChainEvent { /// New best block have been added to the chain diff --git a/client/transaction-pool/graph/Cargo.toml b/client/transaction-pool/graph/Cargo.toml deleted file mode 100644 index b49cadc51c33c..0000000000000 --- a/client/transaction-pool/graph/Cargo.toml +++ /dev/null @@ -1,39 +0,0 @@ -[package] -name = "sc-transaction-graph" -version = "4.0.0-dev" -authors = ["Parity Technologies "] -edition = "2018" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage = "https://substrate.dev" -repository = "https://github.com/paritytech/substrate/" -description = "Generic Transaction Pool" -readme = "README.md" - -[package.metadata.docs.rs] -targets = ["x86_64-unknown-linux-gnu"] - -[dependencies] -derive_more = "0.99.2" -thiserror = "1.0.21" -futures = "0.3.9" -log = "0.4.8" -parking_lot = "0.11.1" -serde = { version = "1.0.101", features = ["derive"] } -sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } -sc-utils = { version = "4.0.0-dev", path = "../../utils" } -sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } -sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } -sp-transaction-pool = { version = "4.0.0-dev", path = "../../../primitives/transaction-pool" } -parity-util-mem = { version = "0.10.0", default-features = false, features = ["primitive-types"] } -linked-hash-map = "0.5.4" -retain_mut = "0.1.3" - -[dev-dependencies] -assert_matches = "1.3.0" -codec = { package = "parity-scale-codec", version = "2.0.0" } -substrate-test-runtime = { version = "2.0.0", path = "../../../test-utils/runtime" } -criterion = "0.3" - -[[bench]] -name = "basics" -harness = false diff --git a/client/transaction-pool/src/graph/base_pool.rs b/client/transaction-pool/src/graph/base_pool.rs index 890a87e82929d..2c8becdfb2f0b 100644 --- a/client/transaction-pool/src/graph/base_pool.rs +++ b/client/transaction-pool/src/graph/base_pool.rs @@ -36,7 +36,7 @@ use sp_runtime::{ use super::{ future::{FutureTransactions, WaitingTransaction}, - ready::ReadyTransactions, + ready::{BestIterator, ReadyTransactions}, }; /// Successful import result. @@ -355,7 +355,7 @@ impl BasePool impl Iterator>> { + pub fn ready(&self) -> BestIterator { self.ready.get() } diff --git a/client/transaction-pool/src/graph/ready.rs b/client/transaction-pool/src/graph/ready.rs index 03689aeb32e6d..92adf2e62d623 100644 --- a/client/transaction-pool/src/graph/ready.rs +++ b/client/transaction-pool/src/graph/ready.rs @@ -23,7 +23,7 @@ use std::{ sync::Arc, }; -use log::trace; +use log::{debug, trace}; use sc_transaction_pool_api::error; use serde::Serialize; use sp_runtime::{traits::Member, transaction_validity::TransactionTag as Tag}; @@ -31,7 +31,7 @@ use sp_runtime::{traits::Member, transaction_validity::TransactionTag as Tag}; use super::{ base_pool::Transaction, future::WaitingTransaction, - tracked_map::{self, ReadOnlyTrackedMap, TrackedMap}, + tracked_map::{self, TrackedMap}, }; /// An in-pool transaction reference. @@ -156,11 +156,16 @@ impl ReadyTransactions { /// - transactions that are valid for a shorter time go first /// 4. Lastly we sort by the time in the queue /// - transactions that are longer in the queue go first - pub fn get(&self) -> impl Iterator>> { + /// + /// The iterator is providing a way to report transactions that the receiver considers invalid. + /// In such case the entire subgraph of transactions that depend on the reported one will be + /// skipped. + pub fn get(&self) -> BestIterator { BestIterator { - all: self.ready.clone(), + all: self.ready.clone_map(), best: self.best.clone(), awaiting: Default::default(), + invalid: Default::default(), } } @@ -479,9 +484,10 @@ impl ReadyTransactions { /// Iterator of ready transactions ordered by priority. pub struct BestIterator { - all: ReadOnlyTrackedMap>, + all: HashMap>, awaiting: HashMap)>, best: BTreeSet>, + invalid: HashSet, } impl BestIterator { @@ -498,6 +504,34 @@ impl BestIterator { } } +impl sc_transaction_pool_api::ReadyTransactions + for BestIterator +{ + fn report_invalid(&mut self, tx: &Self::Item) { + BestIterator::report_invalid(self, tx) + } +} + +impl BestIterator { + /// Report given transaction as invalid. + /// + /// As a consequence, all values that depend on the invalid one will be skipped. + /// When given transaction is not in the pool it has no effect. + /// When invoked on a fully drained iterator it has no effect either. + pub fn report_invalid(&mut self, tx: &Arc>) { + if let Some(to_report) = self.all.get(&tx.hash) { + debug!( + target: "txpool", + "[{:?}] Reported as invalid. Will skip sub-chains while iterating.", + to_report.transaction.transaction.hash + ); + for hash in &to_report.unlocks { + self.invalid.insert(hash.clone()); + } + } + } +} + impl Iterator for BestIterator { type Item = Arc>; @@ -505,9 +539,19 @@ impl Iterator for BestIterator { loop { let best = self.best.iter().next_back()?.clone(); let best = self.best.take(&best)?; + let hash = &best.transaction.hash; + + // Check if the transaction was marked invalid. + if self.invalid.contains(hash) { + debug!( + target: "txpool", + "[{:?}] Skipping invalid child transaction while iterating.", + hash + ); + continue + } - let next = self.all.read().get(&best.transaction.hash).cloned(); - let ready = match next { + let ready = match self.all.get(&hash).cloned() { Some(ready) => ready, // The transaction is not in all, maybe it was removed in the meantime? None => continue, @@ -522,7 +566,6 @@ impl Iterator for BestIterator { // then get from the pool } else { self.all - .read() .get(hash) .map(|next| (next.requires_offset + 1, next.transaction.clone())) }; @@ -635,10 +678,13 @@ mod tests { assert_eq!(ready.get().count(), 3); } - #[test] - fn should_return_best_transactions_in_correct_order() { - // given - let mut ready = ReadyTransactions::default(); + /// Populate the pool, with a graph that looks like so: + /// + /// tx1 -> tx2 \ + /// -> -> tx3 + /// -> tx4 -> tx5 -> tx6 + /// -> tx7 + fn populate_pool(ready: &mut ReadyTransactions>) { let mut tx1 = tx(1); tx1.requires.clear(); let mut tx2 = tx(2); @@ -649,11 +695,17 @@ mod tests { tx3.provides = vec![]; let mut tx4 = tx(4); tx4.requires = vec![tx1.provides[0].clone()]; - tx4.provides = vec![]; - let tx5 = Transaction { - data: vec![5], + tx4.provides = vec![vec![107]]; + let mut tx5 = tx(5); + tx5.requires = vec![tx4.provides[0].clone()]; + tx5.provides = vec![vec![108]]; + let mut tx6 = tx(6); + tx6.requires = vec![tx5.provides[0].clone()]; + tx6.provides = vec![]; + let tx7 = Transaction { + data: vec![7], bytes: 1, - hash: 5, + hash: 7, priority: 1, valid_till: u64::MAX, // use the max here for testing. requires: vec![tx1.provides[0].clone()], @@ -663,20 +715,30 @@ mod tests { }; // when - for tx in vec![tx1, tx2, tx3, tx4, tx5] { - import(&mut ready, tx).unwrap(); + for tx in vec![tx1, tx2, tx3, tx7, tx4, tx5, tx6] { + import(ready, tx).unwrap(); } - // then assert_eq!(ready.best.len(), 1); + } + + #[test] + fn should_return_best_transactions_in_correct_order() { + // given + let mut ready = ReadyTransactions::default(); + populate_pool(&mut ready); + // when let mut it = ready.get().map(|tx| tx.data[0]); + // then assert_eq!(it.next(), Some(1)); assert_eq!(it.next(), Some(2)); assert_eq!(it.next(), Some(3)); assert_eq!(it.next(), Some(4)); assert_eq!(it.next(), Some(5)); + assert_eq!(it.next(), Some(6)); + assert_eq!(it.next(), Some(7)); assert_eq!(it.next(), None); } @@ -725,4 +787,26 @@ mod tests { TransactionRef { transaction: Arc::new(with_priority(3, 3)), insertion_id: 2 } ); } + + #[test] + fn should_skip_invalid_transactions_while_iterating() { + // given + let mut ready = ReadyTransactions::default(); + populate_pool(&mut ready); + + // when + let mut it = ready.get(); + let data = |tx: &Arc>>| tx.data[0]; + + // then + assert_eq!(it.next().as_ref().map(data), Some(1)); + assert_eq!(it.next().as_ref().map(data), Some(2)); + assert_eq!(it.next().as_ref().map(data), Some(3)); + let tx4 = it.next(); + assert_eq!(tx4.as_ref().map(data), Some(4)); + // report 4 as invalid, which should skip 5 & 6. + it.report_invalid(&tx4.unwrap()); + assert_eq!(it.next().as_ref().map(data), Some(7)); + assert_eq!(it.next().as_ref().map(data), None); + } } diff --git a/client/transaction-pool/src/graph/tracked_map.rs b/client/transaction-pool/src/graph/tracked_map.rs index c1fdda227c6ae..2f560d1c56245 100644 --- a/client/transaction-pool/src/graph/tracked_map.rs +++ b/client/transaction-pool/src/graph/tracked_map.rs @@ -57,11 +57,6 @@ impl TrackedMap { std::cmp::max(self.bytes.load(AtomicOrdering::Relaxed), 0) as usize } - /// Read-only clone of the interior. - pub fn clone(&self) -> ReadOnlyTrackedMap { - ReadOnlyTrackedMap(self.index.clone()) - } - /// Lock map for read. pub fn read(&self) -> TrackedMapReadAccess { TrackedMapReadAccess { inner_guard: self.index.read() } @@ -77,18 +72,10 @@ impl TrackedMap { } } -/// Read-only access to map. -/// -/// The only thing can be done is .read(). -pub struct ReadOnlyTrackedMap(Arc>>); - -impl ReadOnlyTrackedMap -where - K: Eq + std::hash::Hash, -{ - /// Lock map for read. - pub fn read(&self) -> TrackedMapReadAccess { - TrackedMapReadAccess { inner_guard: self.0.read() } +impl TrackedMap { + /// Clone the inner map. + pub fn clone_map(&self) -> HashMap { + self.index.read().clone() } } diff --git a/client/transaction-pool/src/graph/validated_pool.rs b/client/transaction-pool/src/graph/validated_pool.rs index e4aad7f342b5b..dba586adc846c 100644 --- a/client/transaction-pool/src/graph/validated_pool.rs +++ b/client/transaction-pool/src/graph/validated_pool.rs @@ -25,7 +25,7 @@ use std::{ use futures::channel::mpsc::{channel, Sender}; use parking_lot::{Mutex, RwLock}; use retain_mut::RetainMut; -use sc_transaction_pool_api::{error, PoolStatus}; +use sc_transaction_pool_api::{error, PoolStatus, ReadyTransactions}; use serde::Serialize; use sp_runtime::{ generic::BlockId, @@ -630,7 +630,7 @@ impl ValidatedPool { } /// Get an iterator for ready transactions ordered by priority - pub fn ready(&self) -> impl Iterator> + Send { + pub fn ready(&self) -> impl ReadyTransactions> + Send { self.pool.read().ready() } diff --git a/client/transaction-pool/src/lib.rs b/client/transaction-pool/src/lib.rs index 6eb5bd2f332ec..8af73c3fe5b48 100644 --- a/client/transaction-pool/src/lib.rs +++ b/client/transaction-pool/src/lib.rs @@ -44,7 +44,7 @@ use futures::{ future::{self, ready}, prelude::*, }; -pub use graph::{ChainApi, Options, Pool, Transaction}; +pub use graph::{base_pool::Limit as PoolLimit, ChainApi, Options, Pool, Transaction}; use parking_lot::Mutex; use std::{ collections::{HashMap, HashSet}, @@ -56,7 +56,8 @@ use std::{ use graph::{ExtrinsicHash, IsValidator}; use sc_transaction_pool_api::{ ChainEvent, ImportNotificationStream, MaintainedTransactionPool, PoolFuture, PoolStatus, - TransactionFor, TransactionPool, TransactionSource, TransactionStatusStreamFor, TxHash, + ReadyTransactions, TransactionFor, TransactionPool, TransactionSource, + TransactionStatusStreamFor, TxHash, }; use sp_core::traits::SpawnEssentialNamed; use sp_runtime::{ @@ -69,7 +70,7 @@ use crate::metrics::MetricsLink as PrometheusMetrics; use prometheus_endpoint::Registry as PrometheusRegistry; type BoxedReadyIterator = - Box>> + Send>; + Box>> + Send>; type ReadyIteratorFor = BoxedReadyIterator, graph::ExtrinsicFor>; diff --git a/client/utils/Cargo.toml b/client/utils/Cargo.toml index 99765dd501dd5..546232cf60708 100644 --- a/client/utils/Cargo.toml +++ b/client/utils/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "I/O for Substrate runtimes" readme = "README.md" @@ -12,7 +12,7 @@ readme = "README.md" [dependencies] futures = "0.3.9" lazy_static = "1.4.0" -prometheus = { version = "0.11.0", default-features = false } +prometheus = { version = "0.12.0", default-features = false } futures-timer = "3.0.2" [features] diff --git a/docker/README.md b/docker/README.md new file mode 100644 index 0000000000000..ca3c1bde4e321 --- /dev/null +++ b/docker/README.md @@ -0,0 +1,22 @@ +# Substrate Builder Docker Image + +The Docker image in this folder is a `builder` image. It is self contained and allow users to build the binaries themselves. +There is no requirement on having Rust or any other toolchain installed but a working Docker environment. + +Unlike the `parity/polkadot` image which contains a single binary (`polkadot`!) used by default, the image in this folder builds and contains several binaries and you need to provide the name of the binary to be called. + +You should refer to the .Dockerfile for the actual list. At the time of editing, the list of included binaries is: + +- substrate +- subkey +- node-template +- chain-spec-builder + +The image can be used by passing the selected binary followed by the appropriate tags for this binary. + +Your best guess to get started is to pass the `--help flag`. Here are a few examples: + +- `docker run --rm -it parity/substrate substrate --version` +- `docker run --rm -it parity/substrate subkey --help` +- `docker run --rm -it parity/substrate node-template --version` +- `docker run --rm -it parity/substrate chain-spec-builder --help` diff --git a/docker/build.sh b/docker/build.sh new file mode 100755 index 0000000000000..f0a4560ff8fea --- /dev/null +++ b/docker/build.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash +set -e + +pushd . + +# The following line ensure we run from the project root +PROJECT_ROOT=`git rev-parse --show-toplevel` +cd $PROJECT_ROOT + +# Find the current version from Cargo.toml +VERSION=`grep "^version" ./bin/node/cli/Cargo.toml | egrep -o "([0-9\.]+)"` +GITUSER=parity +GITREPO=substrate + +# Build the image +echo "Building ${GITUSER}/${GITREPO}:latest docker image, hang on!" +time docker build -f ./docker/substrate_builder.Dockerfile -t ${GITUSER}/${GITREPO}:latest . +docker tag ${GITUSER}/${GITREPO}:latest ${GITUSER}/${GITREPO}:v${VERSION} + +# Show the list of available images for this repo +echo "Image is ready" +docker images | grep ${GITREPO} + +popd diff --git a/docker/substrate_builder.Dockerfile b/docker/substrate_builder.Dockerfile new file mode 100644 index 0000000000000..d0812c1a80c40 --- /dev/null +++ b/docker/substrate_builder.Dockerfile @@ -0,0 +1,35 @@ +# This is the build stage for Substrate. Here we create the binary. +FROM docker.io/paritytech/ci-linux:production as builder + +WORKDIR /substrate +COPY . /substrate +RUN cargo build --locked --release + +# This is the 2nd stage: a very small image where we copy the Substrate binary." +FROM docker.io/library/ubuntu:20.04 +LABEL description="Multistage Docker image for Substrate: a platform for web3" \ + io.parity.image.type="builder" \ + io.parity.image.authors="chevdor@gmail.com, devops-team@parity.io" \ + io.parity.image.vendor="Parity Technologies" \ + io.parity.image.description="Substrate is a next-generation framework for blockchain innovation 🚀" \ + io.parity.image.source="https://github.com/paritytech/polkadot/blob/${VCS_REF}/docker/substrate_builder.Dockerfile" \ + io.parity.image.documentation="https://github.com/paritytech/polkadot/" + +COPY --from=builder /substrate/target/release/substrate /usr/local/bin +COPY --from=builder /substrate/target/release/subkey /usr/local/bin +COPY --from=builder /substrate/target/release/node-template /usr/local/bin +COPY --from=builder /substrate/target/release/chain-spec-builder /usr/local/bin + +RUN useradd -m -u 1000 -U -s /bin/sh -d /substrate substrate && \ + mkdir -p /data /substrate/.local/share/substrate && \ + chown -R substrate:substrate /data && \ + ln -s /data /substrate/.local/share/substrate && \ +# unclutter and minimize the attack surface + rm -rf /usr/bin /usr/sbin && \ +# Sanity checks + ldd /usr/local/bin/substrate && \ + /usr/local/bin/substrate --version + +USER substrate +EXPOSE 30333 9933 9944 9615 +VOLUME ["/data"] diff --git a/docs/CODEOWNERS b/docs/CODEOWNERS index 42d25a0a228f7..ee6382b72f1b2 100644 --- a/docs/CODEOWNERS +++ b/docs/CODEOWNERS @@ -40,6 +40,12 @@ /client/consensus/pow/ @sorpaas /primitives/consensus/pow/ @sorpaas +# BEEFY +/client/beefy/ @adoerr +/frame/beefy/ @adoerr +/frame/beefy-mmr/ @adoerr +/primitives/beefy/ @adoerr + # Contracts /frame/contracts/ @athei diff --git a/docs/CONTRIBUTING.adoc b/docs/CONTRIBUTING.adoc index b0eaec04455e4..5b1920e775bbd 100644 --- a/docs/CONTRIBUTING.adoc +++ b/docs/CONTRIBUTING.adoc @@ -42,7 +42,7 @@ A Pull Request (PR) needs to be reviewed and approved by project maintainers unl . PRs must be tagged with their release importance via the `C1-C9` labels. . PRs must be tagged with their audit requirements via the `D1-D9` labels. . PRs that must be backported to a stable branch must be tagged with https://github.com/paritytech/substrate/labels/E1-runtimemigration[`E0-patchthis`]. -. PRs that introduce runtime migrations must be tagged with https://github.com/paritytech/substrate/labels/E1-runtimemigration[`E1-runtimemigration`]. +. PRs that introduce runtime migrations must be tagged with https://github.com/paritytech/substrate/labels/E1-runtimemigration[`E1-runtimemigration`]. See the https://github.com/paritytech/substrate/blob/master/utils/frame/try-runtime/cli/src/lib.rs#L18[Migration Best Practices here] for more info about how to test runtime migrations. . PRs that introduce irreversible database migrations must be tagged with https://github.com/paritytech/substrate/labels/E2-databasemigration[`E2-databasemigration`]. . PRs that add host functions must be tagged with with https://github.com/paritytech/substrate/labels/E4-newhostfunctions[`E4-newhostfunctions`]. . PRs that break the external API must be tagged with https://github.com/paritytech/substrate/labels/E5-breaksapi[`E5-breaksapi`]. @@ -79,17 +79,17 @@ To create a Polkadot companion PR: . Override substrate deps to point to your local path or branch using https://github.com/bkchr/diener. (E.g. from the polkadot clone dir run `diener patch --crates-to-patch ../substrate --substrate` assuming substrate clone is in a sibling dir. If you do use diener, ensure that you _do not_ commit the changes diener makes to the Cargo.tomls.) . Make the changes required and build polkadot locally. . Submit all this as a PR against the Polkadot Repo. -. Link to your Polkadot PR in the _description_ of your _Substrate_ PR as "polkadot companion: [URL]" +. In the _description_ of your _Substrate_ PR add "polkadot companion: [Polkadot_PR_URL]" . Now you should see that the `check_polkadot` CI job will build your Substrate PR agains the mentioned Polkadot branch in your PR description. . Someone will need to approve the Polkadot PR before the Substrate CI will go green. (The Polkadot CI failing can be ignored as long as the polkadot job in the _substrate_ PR is green). . Wait for reviews on both the Substrate and the Polkadot PRs. . Once the Substrate PR runs green, a member of the `parity` github group can comment on the Substrate PR with `bot merge` which will: - Merge the Substrate PR. - - The bot will push a commit to the Polkadot PR updating its Substrate reference. + - The bot will push a commit to the Polkadot PR updating its Substrate reference. (effecively doing `cargo update -p sp-io`) - If the polkadot PR origins from a fork then a project member may need to press `approve run` on the polkadot PR. - The bot will merge the Polkadot PR once all its CI `{"build_allow_failure":false}` checks are green. - - Note: The merge-bot currently doesn't work with forks on org accounts, only individual accounts. + Note: The merge-bot currently doesn't work with forks on org accounts, only individual accounts. + (Hint: it's recommended to use `bot merge` to merge all substrate PRs, not just ones with a polkadot companion.) If your PR is reviewed well, but a Polkadot PR is missing, signal it with https://github.com/paritytech/substrate/labels/A7-needspolkadotpr[`A7-needspolkadotpr`] to prevent it from getting automatically merged. diff --git a/docs/README.adoc b/docs/README.adoc index 71052420b1aa9..05f81442d4ca8 100644 --- a/docs/README.adoc +++ b/docs/README.adoc @@ -250,6 +250,20 @@ If you are trying to set up Substrate on Windows, you should do the following: 7. Finally, you need to install `cmake`: https://cmake.org/download/ +==== Docker + +You can use https://github.com/paritytech/scripts/tree/master/dockerfiles/ci-linux[Parity CI docker image] with all necessary dependencies to build Substrate: + +[source, shell] +---- +#run it in the folder with the Substrate source code +docker run --rm -it -w /shellhere/substrate \ + -v $(pwd):/shellhere/substrate \ + paritytech/ci-linux:production +---- + +You can find necessary cargo commands in <> + ==== Shared Steps Then, grab the Substrate source code: diff --git a/frame/assets/Cargo.toml b/frame/assets/Cargo.toml index 05e7912dd07c6..65162430a2ac1 100644 --- a/frame/assets/Cargo.toml +++ b/frame/assets/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME asset management pallet" readme = "README.md" diff --git a/frame/assets/src/benchmarking.rs b/frame/assets/src/benchmarking.rs index 43eadffbe8497..d9de9ed3dedd4 100644 --- a/frame/assets/src/benchmarking.rs +++ b/frame/assets/src/benchmarking.rs @@ -21,8 +21,7 @@ use super::*; use frame_benchmarking::{ - account, benchmarks_instance_pallet, impl_benchmark_test_suite, whitelist_account, - whitelisted_caller, + account, benchmarks_instance_pallet, whitelist_account, whitelisted_caller, }; use frame_support::{ dispatch::UnfilteredDispatchable, @@ -438,6 +437,6 @@ benchmarks_instance_pallet! { verify { assert_last_event::(Event::ApprovalCancelled(id, caller, delegate).into()); } -} -impl_benchmark_test_suite!(Assets, crate::mock::new_test_ext(), crate::mock::Test); + impl_benchmark_test_suite!(Assets, crate::mock::new_test_ext(), crate::mock::Test) +} diff --git a/frame/assets/src/functions.rs b/frame/assets/src/functions.rs index 81b490eaf877c..a4685d88d0497 100644 --- a/frame/assets/src/functions.rs +++ b/frame/assets/src/functions.rs @@ -18,6 +18,7 @@ //! Functions for the Assets pallet. use super::*; +use frame_support::{traits::Get, BoundedVec}; // The main implementation block for the module. impl, I: 'static> Pallet { @@ -478,4 +479,212 @@ impl, I: 'static> Pallet { Self::deposit_event(Event::Transferred(id, source.clone(), dest.clone(), credit)); Ok(credit) } + + /// Create a new asset without taking a deposit. + /// + /// * `id`: The `AssetId` you want the new asset to have. Must not already be in use. + /// * `owner`: The owner, issuer, admin, and freezer of this asset upon creation. + /// * `is_sufficient`: Whether this asset needs users to have an existential deposit to hold + /// this asset. + /// * `min_balance`: The minimum balance a user is allowed to have of this asset before they are + /// considered dust and cleaned up. + pub(super) fn do_force_create( + id: T::AssetId, + owner: T::AccountId, + is_sufficient: bool, + min_balance: T::Balance, + ) -> DispatchResult { + ensure!(!Asset::::contains_key(id), Error::::InUse); + ensure!(!min_balance.is_zero(), Error::::MinBalanceZero); + + Asset::::insert( + id, + AssetDetails { + owner: owner.clone(), + issuer: owner.clone(), + admin: owner.clone(), + freezer: owner.clone(), + supply: Zero::zero(), + deposit: Zero::zero(), + min_balance, + is_sufficient, + accounts: 0, + sufficients: 0, + approvals: 0, + is_frozen: false, + }, + ); + Self::deposit_event(Event::ForceCreated(id, owner)); + Ok(()) + } + + /// Destroy an existing asset. + /// + /// * `id`: The asset you want to destroy. + /// * `witness`: Witness data needed about the current state of the asset, used to confirm + /// complexity of the operation. + /// * `maybe_check_owner`: An optional check before destroying the asset, if the provided + /// account is the owner of that asset. Can be used for authorization checks. + pub(super) fn do_destroy( + id: T::AssetId, + witness: DestroyWitness, + maybe_check_owner: Option, + ) -> Result { + Asset::::try_mutate_exists(id, |maybe_details| { + let mut details = maybe_details.take().ok_or(Error::::Unknown)?; + if let Some(check_owner) = maybe_check_owner { + ensure!(details.owner == check_owner, Error::::NoPermission); + } + ensure!(details.accounts <= witness.accounts, Error::::BadWitness); + ensure!(details.sufficients <= witness.sufficients, Error::::BadWitness); + ensure!(details.approvals <= witness.approvals, Error::::BadWitness); + + for (who, v) in Account::::drain_prefix(id) { + Self::dead_account(id, &who, &mut details, v.sufficient); + } + debug_assert_eq!(details.accounts, 0); + debug_assert_eq!(details.sufficients, 0); + + let metadata = Metadata::::take(&id); + T::Currency::unreserve( + &details.owner, + details.deposit.saturating_add(metadata.deposit), + ); + + for ((owner, _), approval) in Approvals::::drain_prefix((&id,)) { + T::Currency::unreserve(&owner, approval.deposit); + } + Self::deposit_event(Event::Destroyed(id)); + + Ok(DestroyWitness { + accounts: details.accounts, + sufficients: details.sufficients, + approvals: details.approvals, + }) + }) + } + + /// Creates an approval from `owner` to spend `amount` of asset `id` tokens by 'delegate' + /// while reserving `T::ApprovalDeposit` from owner + /// + /// If an approval already exists, the new amount is added to such existing approval + pub(super) fn do_approve_transfer( + id: T::AssetId, + owner: &T::AccountId, + delegate: &T::AccountId, + amount: T::Balance, + ) -> DispatchResult { + let mut d = Asset::::get(id).ok_or(Error::::Unknown)?; + ensure!(!d.is_frozen, Error::::Frozen); + Approvals::::try_mutate( + (id, &owner, &delegate), + |maybe_approved| -> DispatchResult { + let mut approved = match maybe_approved.take() { + // an approval already exists and is being updated + Some(a) => a, + // a new approval is created + None => { + d.approvals.saturating_inc(); + Default::default() + }, + }; + let deposit_required = T::ApprovalDeposit::get(); + if approved.deposit < deposit_required { + T::Currency::reserve(&owner, deposit_required - approved.deposit)?; + approved.deposit = deposit_required; + } + approved.amount = approved.amount.saturating_add(amount); + *maybe_approved = Some(approved); + Ok(()) + }, + )?; + Asset::::insert(id, d); + Self::deposit_event(Event::ApprovedTransfer(id, owner.clone(), delegate.clone(), amount)); + + Ok(()) + } + + /// Reduces the asset `id` balance of `owner` by some `amount` and increases the balance of + /// `dest` by (similar) amount, checking that 'delegate' has an existing approval from `owner` + /// to spend`amount`. + /// + /// Will fail if `amount` is greater than the approval from `owner` to 'delegate' + /// Will unreserve the deposit from `owner` if the entire approved `amount` is spent by + /// 'delegate' + pub(super) fn do_transfer_approved( + id: T::AssetId, + owner: &T::AccountId, + delegate: &T::AccountId, + destination: &T::AccountId, + amount: T::Balance, + ) -> DispatchResult { + Approvals::::try_mutate_exists( + (id, &owner, delegate), + |maybe_approved| -> DispatchResult { + let mut approved = maybe_approved.take().ok_or(Error::::Unapproved)?; + let remaining = + approved.amount.checked_sub(&amount).ok_or(Error::::Unapproved)?; + + let f = TransferFlags { keep_alive: false, best_effort: false, burn_dust: false }; + Self::do_transfer(id, &owner, &destination, amount, None, f)?; + + if remaining.is_zero() { + T::Currency::unreserve(&owner, approved.deposit); + Asset::::mutate(id, |maybe_details| { + if let Some(details) = maybe_details { + details.approvals.saturating_dec(); + } + }); + } else { + approved.amount = remaining; + *maybe_approved = Some(approved); + } + Ok(()) + }, + )?; + Ok(()) + } + + /// Do set metadata + pub(super) fn do_set_metadata( + id: T::AssetId, + from: &T::AccountId, + name: Vec, + symbol: Vec, + decimals: u8, + ) -> DispatchResult { + let bounded_name: BoundedVec = + name.clone().try_into().map_err(|_| Error::::BadMetadata)?; + let bounded_symbol: BoundedVec = + symbol.clone().try_into().map_err(|_| Error::::BadMetadata)?; + + let d = Asset::::get(id).ok_or(Error::::Unknown)?; + ensure!(from == &d.owner, Error::::NoPermission); + + Metadata::::try_mutate_exists(id, |metadata| { + ensure!(metadata.as_ref().map_or(true, |m| !m.is_frozen), Error::::NoPermission); + + let old_deposit = metadata.take().map_or(Zero::zero(), |m| m.deposit); + let new_deposit = T::MetadataDepositPerByte::get() + .saturating_mul(((name.len() + symbol.len()) as u32).into()) + .saturating_add(T::MetadataDepositBase::get()); + + if new_deposit > old_deposit { + T::Currency::reserve(from, new_deposit - old_deposit)?; + } else { + T::Currency::unreserve(from, old_deposit - new_deposit); + } + + *metadata = Some(AssetMetadata { + deposit: new_deposit, + name: bounded_name, + symbol: bounded_symbol, + decimals, + is_frozen: false, + }); + + Self::deposit_event(Event::MetadataSet(id, name, symbol, decimals, false)); + Ok(()) + }) + } } diff --git a/frame/assets/src/impl_fungibles.rs b/frame/assets/src/impl_fungibles.rs index 4e85b20a1fbb1..be534bfad57ab 100644 --- a/frame/assets/src/impl_fungibles.rs +++ b/frame/assets/src/impl_fungibles.rs @@ -60,6 +60,25 @@ impl, I: 'static> fungibles::Inspect<::AccountId } } +impl, I: 'static> fungibles::InspectMetadata<::AccountId> + for Pallet +{ + /// Return the name of an asset. + fn name(asset: &Self::AssetId) -> Vec { + Metadata::::get(asset).name.to_vec() + } + + /// Return the symbol of an asset. + fn symbol(asset: &Self::AssetId) -> Vec { + Metadata::::get(asset).symbol.to_vec() + } + + /// Return the decimals of an asset. + fn decimals(asset: &Self::AssetId) -> u8 { + Metadata::::get(asset).decimals + } +} + impl, I: 'static> fungibles::Mutate<::AccountId> for Pallet { fn mint_into( asset: Self::AssetId, @@ -147,3 +166,99 @@ impl, I: 'static> fungibles::Unbalanced for Pallet, I: 'static> fungibles::Create for Pallet { + fn create( + id: T::AssetId, + admin: T::AccountId, + is_sufficient: bool, + min_balance: Self::Balance, + ) -> DispatchResult { + Self::do_force_create(id, admin, is_sufficient, min_balance) + } +} + +impl, I: 'static> fungibles::Destroy for Pallet { + type DestroyWitness = DestroyWitness; + + fn get_destroy_witness(asset: &T::AssetId) -> Option { + Asset::::get(asset).map(|asset_details| asset_details.destroy_witness()) + } + + fn destroy( + id: T::AssetId, + witness: Self::DestroyWitness, + maybe_check_owner: Option, + ) -> Result { + Self::do_destroy(id, witness, maybe_check_owner) + } +} + +impl, I: 'static> fungibles::metadata::Inspect<::AccountId> + for Pallet +{ + fn name(asset: T::AssetId) -> Vec { + Metadata::::get(asset).name.to_vec() + } + + fn symbol(asset: T::AssetId) -> Vec { + Metadata::::get(asset).symbol.to_vec() + } + + fn decimals(asset: T::AssetId) -> u8 { + Metadata::::get(asset).decimals + } +} + +impl, I: 'static> fungibles::metadata::Mutate<::AccountId> + for Pallet +{ + fn set( + asset: T::AssetId, + from: &::AccountId, + name: Vec, + symbol: Vec, + decimals: u8, + ) -> DispatchResult { + Self::do_set_metadata(asset, from, name, symbol, decimals) + } +} + +impl, I: 'static> fungibles::approvals::Inspect<::AccountId> + for Pallet +{ + // Check the amount approved to be spent by an owner to a delegate + fn allowance( + asset: T::AssetId, + owner: &::AccountId, + delegate: &::AccountId, + ) -> T::Balance { + Approvals::::get((asset, &owner, &delegate)) + .map(|x| x.amount) + .unwrap_or_else(Zero::zero) + } +} + +impl, I: 'static> fungibles::approvals::Mutate<::AccountId> + for Pallet +{ + fn approve( + asset: T::AssetId, + owner: &::AccountId, + delegate: &::AccountId, + amount: T::Balance, + ) -> DispatchResult { + Self::do_approve_transfer(asset, owner, delegate, amount) + } + + // Aprove spending tokens from a given account + fn transfer_from( + asset: T::AssetId, + owner: &::AccountId, + delegate: &::AccountId, + dest: &::AccountId, + amount: T::Balance, + ) -> DispatchResult { + Self::do_transfer_approved(asset, owner, delegate, dest, amount) + } +} diff --git a/frame/assets/src/lib.rs b/frame/assets/src/lib.rs index 797a3ae7ee9fb..4176242c8394a 100644 --- a/frame/assets/src/lib.rs +++ b/frame/assets/src/lib.rs @@ -143,6 +143,7 @@ use codec::HasCompact; use frame_support::{ dispatch::{DispatchError, DispatchResult}, ensure, + pallet_prelude::DispatchResultWithPostInfo, traits::{ tokens::{fungibles, DepositConsequence, WithdrawConsequence}, BalanceStatus::Reserved, @@ -158,6 +159,9 @@ use sp_runtime::{ }; use sp_std::{borrow::Borrow, convert::TryInto, prelude::*}; +#[cfg(feature = "std")] +use frame_support::traits::GenesisBuild; + pub use pallet::*; pub use weights::WeightInfo; @@ -166,6 +170,7 @@ pub mod pallet { use super::*; use frame_support::{dispatch::DispatchResult, pallet_prelude::*}; use frame_system::pallet_prelude::*; + use scale_info::TypeInfo; #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] @@ -179,10 +184,24 @@ pub mod pallet { type Event: From> + IsType<::Event>; /// The units in which we record balances. - type Balance: Member + Parameter + AtLeast32BitUnsigned + Default + Copy + MaxEncodedLen; + type Balance: Member + + Parameter + + AtLeast32BitUnsigned + + Default + + Copy + + MaybeSerializeDeserialize + + MaxEncodedLen + + TypeInfo; /// Identifier for the class of asset. - type AssetId: Member + Parameter + Default + Copy + HasCompact + MaxEncodedLen; + type AssetId: Member + + Parameter + + Default + + Copy + + HasCompact + + MaybeSerializeDeserialize + + MaxEncodedLen + + TypeInfo; /// The currency mechanism. type Currency: ReservableCurrency; @@ -275,6 +294,89 @@ pub mod pallet { ConstU32<300_000>, >; + #[pallet::genesis_config] + pub struct GenesisConfig, I: 'static = ()> { + /// Genesis assets: id, owner, is_sufficient, min_balance + pub assets: Vec<(T::AssetId, T::AccountId, bool, T::Balance)>, + /// Genesis metadata: id, name, symbol, decimals + pub metadata: Vec<(T::AssetId, Vec, Vec, u8)>, + /// Genesis accounts: id, account_id, balance + pub accounts: Vec<(T::AssetId, T::AccountId, T::Balance)>, + } + + #[cfg(feature = "std")] + impl, I: 'static> Default for GenesisConfig { + fn default() -> Self { + Self { + assets: Default::default(), + metadata: Default::default(), + accounts: Default::default(), + } + } + } + + #[pallet::genesis_build] + impl, I: 'static> GenesisBuild for GenesisConfig { + fn build(&self) { + for (id, owner, is_sufficient, min_balance) in &self.assets { + assert!(!Asset::::contains_key(id), "Asset id already in use"); + assert!(!min_balance.is_zero(), "Min balance should not be zero"); + Asset::::insert( + id, + AssetDetails { + owner: owner.clone(), + issuer: owner.clone(), + admin: owner.clone(), + freezer: owner.clone(), + supply: Zero::zero(), + deposit: Zero::zero(), + min_balance: *min_balance, + is_sufficient: *is_sufficient, + accounts: 0, + sufficients: 0, + approvals: 0, + is_frozen: false, + }, + ); + } + + for (id, name, symbol, decimals) in &self.metadata { + assert!(Asset::::contains_key(id), "Asset does not exist"); + + let bounded_name: BoundedVec = + name.clone().try_into().expect("asset name is too long"); + let bounded_symbol: BoundedVec = + symbol.clone().try_into().expect("asset symbol is too long"); + + let metadata = AssetMetadata { + deposit: Zero::zero(), + name: bounded_name, + symbol: bounded_symbol, + decimals: *decimals, + is_frozen: false, + }; + Metadata::::insert(id, metadata); + } + + for (id, account_id, amount) in &self.accounts { + let result = >::increase_balance( + *id, + account_id, + *amount, + |details| -> DispatchResult { + debug_assert!( + T::Balance::max_value() - details.supply >= *amount, + "checked in prep; qed" + ); + details.supply = details.supply.saturating_add(*amount); + Ok(()) + }, + ); + assert!(result.is_ok()); + } + } + } + #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event, I: 'static = ()> { @@ -437,29 +539,7 @@ pub mod pallet { ) -> DispatchResult { T::ForceOrigin::ensure_origin(origin)?; let owner = T::Lookup::lookup(owner)?; - - ensure!(!Asset::::contains_key(id), Error::::InUse); - ensure!(!min_balance.is_zero(), Error::::MinBalanceZero); - - Asset::::insert( - id, - AssetDetails { - owner: owner.clone(), - issuer: owner.clone(), - admin: owner.clone(), - freezer: owner.clone(), - supply: Zero::zero(), - deposit: Zero::zero(), - min_balance, - is_sufficient, - accounts: 0, - sufficients: 0, - approvals: 0, - is_frozen: false, - }, - ); - Self::deposit_event(Event::ForceCreated(id, owner)); - Ok(()) + Self::do_force_create(id, owner, is_sufficient, min_balance) } /// Destroy a class of fungible assets. @@ -494,39 +574,13 @@ pub mod pallet { Ok(_) => None, Err(origin) => Some(ensure_signed(origin)?), }; - Asset::::try_mutate_exists(id, |maybe_details| { - let mut details = maybe_details.take().ok_or(Error::::Unknown)?; - if let Some(check_owner) = maybe_check_owner { - ensure!(details.owner == check_owner, Error::::NoPermission); - } - ensure!(details.accounts <= witness.accounts, Error::::BadWitness); - ensure!(details.sufficients <= witness.sufficients, Error::::BadWitness); - ensure!(details.approvals <= witness.approvals, Error::::BadWitness); - - for (who, v) in Account::::drain_prefix(id) { - Self::dead_account(id, &who, &mut details, v.sufficient); - } - debug_assert_eq!(details.accounts, 0); - debug_assert_eq!(details.sufficients, 0); - - let metadata = Metadata::::take(&id); - T::Currency::unreserve( - &details.owner, - details.deposit.saturating_add(metadata.deposit), - ); - - for ((owner, _), approval) in Approvals::::drain_prefix((&id,)) { - T::Currency::unreserve(&owner, approval.deposit); - } - Self::deposit_event(Event::Destroyed(id)); - - Ok(Some(T::WeightInfo::destroy( - details.accounts.saturating_sub(details.sufficients), - details.sufficients, - details.approvals, - )) - .into()) - }) + let details = Self::do_destroy(id, witness, maybe_check_owner)?; + Ok(Some(T::WeightInfo::destroy( + details.accounts.saturating_sub(details.sufficients), + details.sufficients, + details.approvals, + )) + .into()) } /// Mint assets of a particular class. @@ -897,43 +951,7 @@ pub mod pallet { decimals: u8, ) -> DispatchResult { let origin = ensure_signed(origin)?; - - let bounded_name: BoundedVec = - name.clone().try_into().map_err(|_| Error::::BadMetadata)?; - let bounded_symbol: BoundedVec = - symbol.clone().try_into().map_err(|_| Error::::BadMetadata)?; - - let d = Asset::::get(id).ok_or(Error::::Unknown)?; - ensure!(&origin == &d.owner, Error::::NoPermission); - - Metadata::::try_mutate_exists(id, |metadata| { - ensure!( - metadata.as_ref().map_or(true, |m| !m.is_frozen), - Error::::NoPermission - ); - - let old_deposit = metadata.take().map_or(Zero::zero(), |m| m.deposit); - let new_deposit = T::MetadataDepositPerByte::get() - .saturating_mul(((name.len() + symbol.len()) as u32).into()) - .saturating_add(T::MetadataDepositBase::get()); - - if new_deposit > old_deposit { - T::Currency::reserve(&origin, new_deposit - old_deposit)?; - } else { - T::Currency::unreserve(&origin, old_deposit - new_deposit); - } - - *metadata = Some(AssetMetadata { - deposit: new_deposit, - name: bounded_name, - symbol: bounded_symbol, - decimals, - is_frozen: false, - }); - - Self::deposit_event(Event::MetadataSet(id, name, symbol, decimals, false)); - Ok(()) - }) + Self::do_set_metadata(id, &origin, name, symbol, decimals) } /// Clear the metadata for an asset. @@ -1120,35 +1138,7 @@ pub mod pallet { ) -> DispatchResult { let owner = ensure_signed(origin)?; let delegate = T::Lookup::lookup(delegate)?; - - let mut d = Asset::::get(id).ok_or(Error::::Unknown)?; - ensure!(!d.is_frozen, Error::::Frozen); - Approvals::::try_mutate( - (id, &owner, &delegate), - |maybe_approved| -> DispatchResult { - let mut approved = match maybe_approved.take() { - // an approval already exists and is being updated - Some(a) => a, - // a new approval is created - None => { - d.approvals.saturating_inc(); - Default::default() - }, - }; - let deposit_required = T::ApprovalDeposit::get(); - if approved.deposit < deposit_required { - T::Currency::reserve(&owner, deposit_required - approved.deposit)?; - approved.deposit = deposit_required; - } - approved.amount = approved.amount.saturating_add(amount); - *maybe_approved = Some(approved); - Ok(()) - }, - )?; - Asset::::insert(id, d); - Self::deposit_event(Event::ApprovedTransfer(id, owner, delegate, amount)); - - Ok(()) + Self::do_approve_transfer(id, &owner, &delegate, amount) } /// Cancel all of some asset approved for delegated transfer by a third-party account. @@ -1255,33 +1245,7 @@ pub mod pallet { let delegate = ensure_signed(origin)?; let owner = T::Lookup::lookup(owner)?; let destination = T::Lookup::lookup(destination)?; - - Approvals::::try_mutate_exists( - (id, &owner, delegate), - |maybe_approved| -> DispatchResult { - let mut approved = maybe_approved.take().ok_or(Error::::Unapproved)?; - let remaining = - approved.amount.checked_sub(&amount).ok_or(Error::::Unapproved)?; - - let f = - TransferFlags { keep_alive: false, best_effort: false, burn_dust: false }; - Self::do_transfer(id, &owner, &destination, amount, None, f)?; - - if remaining.is_zero() { - T::Currency::unreserve(&owner, approved.deposit); - Asset::::mutate(id, |maybe_details| { - if let Some(details) = maybe_details { - details.approvals.saturating_dec(); - } - }); - } else { - approved.amount = remaining; - *maybe_approved = Some(approved); - } - Ok(()) - }, - )?; - Ok(()) + Self::do_transfer_approved(id, &owner, &delegate, &destination, amount) } } } diff --git a/frame/assets/src/mock.rs b/frame/assets/src/mock.rs index 1b2602792d844..1e1ea8ba9a961 100644 --- a/frame/assets/src/mock.rs +++ b/frame/assets/src/mock.rs @@ -144,9 +144,26 @@ pub(crate) fn hooks() -> Vec { } pub(crate) fn new_test_ext() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - - let mut ext = sp_io::TestExternalities::new(t); + let mut storage = frame_system::GenesisConfig::default().build_storage::().unwrap(); + + let config: pallet_assets::GenesisConfig = pallet_assets::GenesisConfig { + assets: vec![ + // id, owner, is_sufficient, min_balance + (999, 0, true, 1), + ], + metadata: vec![ + // id, name, symbol, decimals + (999, "Token Name".into(), "TOKEN".into(), 10), + ], + accounts: vec![ + // id, account_id, balance + (999, 1, 100), + ], + }; + + config.assimilate_storage(&mut storage).unwrap(); + + let mut ext: sp_io::TestExternalities = storage.into(); ext.execute_with(|| System::set_block_number(1)); ext } diff --git a/frame/assets/src/tests.rs b/frame/assets/src/tests.rs index aab534a6e4efc..5250fafaa8f9a 100644 --- a/frame/assets/src/tests.rs +++ b/frame/assets/src/tests.rs @@ -784,3 +784,47 @@ fn balance_conversion_should_work() { ); }); } + +#[test] +fn assets_from_genesis_should_exist() { + new_test_ext().execute_with(|| { + assert!(Asset::::contains_key(999)); + assert!(Metadata::::contains_key(999)); + assert_eq!(Assets::balance(999, 1), 100); + assert_eq!(Assets::total_supply(999), 100); + }); +} + +#[test] +fn querying_name_symbol_and_decimals_should_work() { + new_test_ext().execute_with(|| { + use frame_support::traits::tokens::fungibles::metadata::Inspect; + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::force_set_metadata( + Origin::root(), + 0, + vec![0u8; 10], + vec![1u8; 10], + 12, + false + )); + assert_eq!(Assets::name(0), vec![0u8; 10]); + assert_eq!(Assets::symbol(0), vec![1u8; 10]); + assert_eq!(Assets::decimals(0), 12); + }); +} + +#[test] +fn querying_allowance_should_work() { + new_test_ext().execute_with(|| { + use frame_support::traits::tokens::fungibles::approvals::{Inspect, Mutate}; + assert_ok!(Assets::force_create(Origin::root(), 0, 1, true, 1)); + assert_ok!(Assets::mint(Origin::signed(1), 0, 1, 100)); + Balances::make_free_balance_be(&1, 1); + assert_ok!(Assets::approve(0, &1, &2, 50)); + assert_eq!(Assets::allowance(0, &1, &2), 50); + // Transfer asset 0, from owner 1 and delegate 2 to destination 3 + assert_ok!(Assets::transfer_from(0, &1, &2, &3, 50)); + assert_eq!(Assets::allowance(0, &1, &2), 0); + }); +} diff --git a/frame/atomic-swap/Cargo.toml b/frame/atomic-swap/Cargo.toml index 53a8c3a81165b..60e8fa613f06b 100644 --- a/frame/atomic-swap/Cargo.toml +++ b/frame/atomic-swap/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME atomic swap pallet" readme = "README.md" diff --git a/frame/aura/Cargo.toml b/frame/aura/Cargo.toml index 8f5c42bc3c465..1761f78edeca2 100644 --- a/frame/aura/Cargo.toml +++ b/frame/aura/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME AURA consensus pallet" readme = "README.md" diff --git a/frame/aura/src/lib.rs b/frame/aura/src/lib.rs index e8b68f928e087..4b5294835403a 100644 --- a/frame/aura/src/lib.rs +++ b/frame/aura/src/lib.rs @@ -221,7 +221,7 @@ impl OneSessionHandler for Pallet { } } - fn on_disabled(i: usize) { + fn on_disabled(i: u32) { let log: DigestItem = DigestItem::Consensus( AURA_ENGINE_ID, ConsensusLog::::OnDisabled(i as AuthorityIndex).encode(), diff --git a/frame/authority-discovery/Cargo.toml b/frame/authority-discovery/Cargo.toml index 80a320c31e77f..7e64509ce6b45 100644 --- a/frame/authority-discovery/Cargo.toml +++ b/frame/authority-discovery/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet for authority discovery" readme = "README.md" diff --git a/frame/authority-discovery/src/lib.rs b/frame/authority-discovery/src/lib.rs index d093b1533c693..8fced0d18cff1 100644 --- a/frame/authority-discovery/src/lib.rs +++ b/frame/authority-discovery/src/lib.rs @@ -166,7 +166,7 @@ impl OneSessionHandler for Pallet { } } - fn on_disabled(_i: usize) { + fn on_disabled(_i: u32) { // ignore } } @@ -218,7 +218,6 @@ mod tests { type Event = Event; type ValidatorId = AuthorityId; type ValidatorIdOf = ConvertInto; - type DisabledValidatorsThreshold = DisabledValidatorsThreshold; type NextSessionRotation = pallet_session::PeriodicSessions; type WeightInfo = (); } @@ -276,7 +275,7 @@ mod tests { ) { } - fn on_disabled(_validator_index: usize) {} + fn on_disabled(_validator_index: u32) {} fn on_genesis_session(_validators: &[(AuthorityId, Ks)]) {} } diff --git a/frame/authorship/Cargo.toml b/frame/authorship/Cargo.toml index 120b72f8e6511..6ac91970712d7 100644 --- a/frame/authorship/Cargo.toml +++ b/frame/authorship/Cargo.toml @@ -5,7 +5,7 @@ description = "Block and Uncle Author tracking for the FRAME" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" diff --git a/frame/babe/Cargo.toml b/frame/babe/Cargo.toml index d95f1419fd035..6b0dc71b5e29a 100644 --- a/frame/babe/Cargo.toml +++ b/frame/babe/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Consensus extension module for BABE consensus. Collects on-chain randomness from VRF outputs and manages epoch transitions." readme = "README.md" diff --git a/frame/babe/src/benchmarking.rs b/frame/babe/src/benchmarking.rs index 372dfa532a894..7747c9bd1fc8c 100644 --- a/frame/babe/src/benchmarking.rs +++ b/frame/babe/src/benchmarking.rs @@ -63,6 +63,12 @@ benchmarks! { } verify { assert!(sp_consensus_babe::check_equivocation_proof::

(equivocation_proof2)); } + + impl_benchmark_test_suite!( + Pallet, + crate::mock::new_test_ext(3), + crate::mock::Test, + ) } #[cfg(test)] @@ -70,12 +76,6 @@ mod tests { use super::*; use crate::mock::*; - frame_benchmarking::impl_benchmark_test_suite!( - Pallet, - crate::mock::new_test_ext(3), - crate::mock::Test, - ); - #[test] fn test_generate_equivocation_report_blob() { let (pairs, mut ext) = new_test_ext_with_pairs(3); diff --git a/frame/babe/src/lib.rs b/frame/babe/src/lib.rs index b39074bb3f057..9c755eea6c446 100644 --- a/frame/babe/src/lib.rs +++ b/frame/babe/src/lib.rs @@ -25,11 +25,13 @@ use codec::{Decode, Encode}; use frame_support::{ dispatch::DispatchResultWithPostInfo, traits::{ - DisabledValidators, FindAuthor, Get, KeyOwnerProofSystem, OnTimestampSet, OneSessionHandler, + ConstU32, DisabledValidators, FindAuthor, Get, KeyOwnerProofSystem, OnTimestampSet, + OneSessionHandler, }, weights::{Pays, Weight}, + BoundedVec, WeakBoundedVec, }; -use sp_application_crypto::Public; +use sp_application_crypto::{Public, TryFrom}; use sp_runtime::{ generic::DigestItem, traits::{IsMember, One, SaturatedConversion, Saturating, Zero}, @@ -100,7 +102,7 @@ impl EpochChangeTrigger for SameAuthoritiesForever { } } -const UNDER_CONSTRUCTION_SEGMENT_LENGTH: usize = 256; +const UNDER_CONSTRUCTION_SEGMENT_LENGTH: u32 = 256; type MaybeRandomness = Option; @@ -113,6 +115,7 @@ pub mod pallet { /// The BABE Pallet #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] + #[pallet::generate_storage_info] pub struct Pallet(_); #[pallet::config] @@ -169,6 +172,10 @@ pub mod pallet { type HandleEquivocation: HandleEquivocation; type WeightInfo: WeightInfo; + + /// Max number of authorities allowed + #[pallet::constant] + type MaxAuthorities: Get; } #[pallet::error] @@ -189,7 +196,11 @@ pub mod pallet { /// Current epoch authorities. #[pallet::storage] #[pallet::getter(fn authorities)] - pub type Authorities = StorageValue<_, Vec<(AuthorityId, BabeAuthorityWeight)>, ValueQuery>; + pub type Authorities = StorageValue< + _, + WeakBoundedVec<(AuthorityId, BabeAuthorityWeight), T::MaxAuthorities>, + ValueQuery, + >; /// The slot at which the first epoch actually started. This is 0 /// until the first block of the chain. @@ -229,8 +240,11 @@ pub mod pallet { /// Next epoch authorities. #[pallet::storage] - pub(super) type NextAuthorities = - StorageValue<_, Vec<(AuthorityId, BabeAuthorityWeight)>, ValueQuery>; + pub(super) type NextAuthorities = StorageValue< + _, + WeakBoundedVec<(AuthorityId, BabeAuthorityWeight), T::MaxAuthorities>, + ValueQuery, + >; /// Randomness under construction. /// @@ -246,8 +260,13 @@ pub mod pallet { /// TWOX-NOTE: `SegmentIndex` is an increasing integer, so this is okay. #[pallet::storage] - pub(super) type UnderConstruction = - StorageMap<_, Twox64Concat, u32, Vec, ValueQuery>; + pub(super) type UnderConstruction = StorageMap< + _, + Twox64Concat, + u32, + BoundedVec>, + ValueQuery, + >; /// Temporary value (cleared at block finalization) which is `Some` /// if per-block initialization has already been called for current block. @@ -503,8 +522,8 @@ impl Pallet { /// Typically, this is not handled directly by the user, but by higher-level validator-set /// manager logic like `pallet-session`. pub fn enact_epoch_change( - authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, - next_authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, + authorities: WeakBoundedVec<(AuthorityId, BabeAuthorityWeight), T::MaxAuthorities>, + next_authorities: WeakBoundedVec<(AuthorityId, BabeAuthorityWeight), T::MaxAuthorities>, ) { // PRECONDITION: caller has done initialization and is guaranteed // by the session module to be called before this. @@ -541,8 +560,10 @@ impl Pallet { // so that nodes can track changes. let next_randomness = NextRandomness::::get(); - let next_epoch = - NextEpochDescriptor { authorities: next_authorities, randomness: next_randomness }; + let next_epoch = NextEpochDescriptor { + authorities: next_authorities.to_vec(), + randomness: next_randomness, + }; Self::deposit_consensus(ConsensusLog::NextEpochData(next_epoch)); if let Some(next_config) = NextEpochConfig::::get() { @@ -571,7 +592,7 @@ impl Pallet { epoch_index: EpochIndex::::get(), start_slot: Self::current_epoch_start(), duration: T::EpochDuration::get(), - authorities: Self::authorities(), + authorities: Self::authorities().to_vec(), randomness: Self::randomness(), config: EpochConfig::::get() .expect("EpochConfig is initialized in genesis; we never `take` or `kill` it; qed"), @@ -590,7 +611,7 @@ impl Pallet { epoch_index: next_epoch_index, start_slot: Self::epoch_start(next_epoch_index), duration: T::EpochDuration::get(), - authorities: NextAuthorities::::get(), + authorities: NextAuthorities::::get().to_vec(), randomness: NextRandomness::::get(), config: NextEpochConfig::::get().unwrap_or_else(|| { EpochConfig::::get().expect( @@ -619,14 +640,18 @@ impl Pallet { fn deposit_randomness(randomness: &schnorrkel::Randomness) { let segment_idx = SegmentIndex::::get(); let mut segment = UnderConstruction::::get(&segment_idx); - if segment.len() < UNDER_CONSTRUCTION_SEGMENT_LENGTH { + if segment.try_push(*randomness).is_ok() { // push onto current segment: not full. - segment.push(*randomness); UnderConstruction::::insert(&segment_idx, &segment); } else { // move onto the next segment and update the index. let segment_idx = segment_idx + 1; - UnderConstruction::::insert(&segment_idx, &vec![randomness.clone()]); + let bounded_randomness = + BoundedVec::<_, ConstU32>::try_from(vec![ + randomness.clone(), + ]) + .expect("UNDER_CONSTRUCTION_SEGMENT_LENGTH >= 1"); + UnderConstruction::::insert(&segment_idx, bounded_randomness); SegmentIndex::::put(&segment_idx); } } @@ -667,7 +692,7 @@ impl Pallet { // we use the same values as genesis because we haven't collected any // randomness yet. let next = NextEpochDescriptor { - authorities: Self::authorities(), + authorities: Self::authorities().to_vec(), randomness: Self::randomness(), }; @@ -732,7 +757,7 @@ impl Pallet { let segment_idx: u32 = SegmentIndex::::mutate(|s| sp_std::mem::replace(s, 0)); // overestimate to the segment being full. - let rho_size = segment_idx.saturating_add(1) as usize * UNDER_CONSTRUCTION_SEGMENT_LENGTH; + let rho_size = (segment_idx.saturating_add(1) * UNDER_CONSTRUCTION_SEGMENT_LENGTH) as usize; let next_randomness = compute_randomness( this_randomness, @@ -747,8 +772,11 @@ impl Pallet { fn initialize_authorities(authorities: &[(AuthorityId, BabeAuthorityWeight)]) { if !authorities.is_empty() { assert!(Authorities::::get().is_empty(), "Authorities are already initialized!"); - Authorities::::put(authorities); - NextAuthorities::::put(authorities); + let bounded_authorities = + WeakBoundedVec::<_, T::MaxAuthorities>::try_from(authorities.to_vec()) + .expect("Initial number of authorities should be lower than T::MaxAuthorities"); + Authorities::::put(&bounded_authorities); + NextAuthorities::::put(&bounded_authorities); } } @@ -878,14 +906,28 @@ impl OneSessionHandler for Pallet { I: Iterator, { let authorities = validators.map(|(_account, k)| (k, 1)).collect::>(); + let bounded_authorities = WeakBoundedVec::<_, T::MaxAuthorities>::force_from( + authorities, + Some( + "Warning: The session has more validators than expected. \ + A runtime configuration adjustment may be needed.", + ), + ); let next_authorities = queued_validators.map(|(_account, k)| (k, 1)).collect::>(); + let next_bounded_authorities = WeakBoundedVec::<_, T::MaxAuthorities>::force_from( + next_authorities, + Some( + "Warning: The session has more queued validators than expected. \ + A runtime configuration adjustment may be needed.", + ), + ); - Self::enact_epoch_change(authorities, next_authorities) + Self::enact_epoch_change(bounded_authorities, next_bounded_authorities) } - fn on_disabled(i: usize) { - Self::deposit_consensus(ConsensusLog::OnDisabled(i as u32)) + fn on_disabled(i: u32) { + Self::deposit_consensus(ConsensusLog::OnDisabled(i)) } } diff --git a/frame/babe/src/mock.rs b/frame/babe/src/mock.rs index 833a68fbddb6c..e7ec692689032 100644 --- a/frame/babe/src/mock.rs +++ b/frame/babe/src/mock.rs @@ -68,7 +68,6 @@ frame_support::construct_runtime!( parameter_types! { pub const BlockHashCount: u64 = 250; - pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(16); pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights::simple_max(1024); } @@ -122,7 +121,6 @@ impl pallet_session::Config for Test { type SessionManager = pallet_session::historical::NoteHistoricalRoot; type SessionHandler = ::KeyTypeIdProviders; type Keys = MockSessionKeys; - type DisabledValidatorsThreshold = DisabledValidatorsThreshold; type WeightInfo = (); } @@ -189,6 +187,7 @@ parameter_types! { pub const MaxNominatorRewardedPerValidator: u32 = 64; pub const ElectionLookahead: u64 = 0; pub const StakingUnsignedPriority: u64 = u64::MAX / 2; + pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(16); } impl onchain::Config for Test { @@ -212,6 +211,7 @@ impl pallet_staking::Config for Test { type UnixTime = pallet_timestamp::Pallet; type EraPayout = pallet_staking::ConvertCurve; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; + type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; type ElectionProvider = onchain::OnChainSequentialPhragmen; type GenesisElectionProvider = Self::ElectionProvider; @@ -230,6 +230,7 @@ parameter_types! { pub const ExpectedBlockTime: u64 = 1; pub const ReportLongevity: u64 = BondingDuration::get() as u64 * SessionsPerEra::get() as u64 * EpochDuration::get(); + pub const MaxAuthorities: u32 = 10; } impl Config for Test { @@ -252,6 +253,7 @@ impl Config for Test { super::EquivocationHandler; type WeightInfo = (); + type MaxAuthorities = MaxAuthorities; } pub fn go_to_block(n: u64, s: u64) { diff --git a/frame/babe/src/tests.rs b/frame/babe/src/tests.rs index dc2f74c719519..34d861d5d97f7 100644 --- a/frame/babe/src/tests.rs +++ b/frame/babe/src/tests.rs @@ -92,7 +92,7 @@ fn first_block_epoch_zero_start() { let consensus_log = sp_consensus_babe::ConsensusLog::NextEpochData( sp_consensus_babe::digests::NextEpochDescriptor { - authorities: Babe::authorities(), + authorities: Babe::authorities().to_vec(), randomness: Babe::randomness(), }, ); diff --git a/frame/bags-list/Cargo.toml b/frame/bags-list/Cargo.toml index 860a6edc42143..cd06ce4a69983 100644 --- a/frame/bags-list/Cargo.toml +++ b/frame/bags-list/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet bags list" readme = "README.md" diff --git a/frame/bags-list/src/benchmarks.rs b/frame/bags-list/src/benchmarks.rs index a820eeba13b12..d86adc674c44a 100644 --- a/frame/bags-list/src/benchmarks.rs +++ b/frame/bags-list/src/benchmarks.rs @@ -134,11 +134,10 @@ frame_benchmarking::benchmarks! { ] ); } -} -use frame_benchmarking::impl_benchmark_test_suite; -impl_benchmark_test_suite!( - Pallet, - crate::mock::ExtBuilder::default().build(), - crate::mock::Runtime, -); + impl_benchmark_test_suite!( + Pallet, + crate::mock::ExtBuilder::default().build(), + crate::mock::Runtime, + ) +} diff --git a/frame/balances/Cargo.toml b/frame/balances/Cargo.toml index 2263387d6d8ef..f6e6e97850a72 100644 --- a/frame/balances/Cargo.toml +++ b/frame/balances/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet to manage balances" readme = "README.md" diff --git a/frame/balances/src/benchmarking.rs b/frame/balances/src/benchmarking.rs index 06d202ea37002..1c48820094187 100644 --- a/frame/balances/src/benchmarking.rs +++ b/frame/balances/src/benchmarking.rs @@ -21,9 +21,7 @@ use super::*; -use frame_benchmarking::{ - account, benchmarks_instance_pallet, impl_benchmark_test_suite, whitelisted_caller, -}; +use frame_benchmarking::{account, benchmarks_instance_pallet, whitelisted_caller}; use frame_system::RawOrigin; use sp_runtime::traits::Bounded; @@ -215,10 +213,10 @@ benchmarks_instance_pallet! { assert!(Balances::::reserved_balance(&user).is_zero()); assert_eq!(Balances::::free_balance(&user), balance); } -} -impl_benchmark_test_suite!( - Balances, - crate::tests_composite::ExtBuilder::default().build(), - crate::tests_composite::Test, -); + impl_benchmark_test_suite!( + Balances, + crate::tests_composite::ExtBuilder::default().build(), + crate::tests_composite::Test, + ) +} diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index afd2331c8e3cf..da8019583c3be 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -463,8 +463,6 @@ pub mod pallet { Transfer(T::AccountId, T::AccountId, T::Balance), /// A balance was set by root. \[who, free, reserved\] BalanceSet(T::AccountId, T::Balance, T::Balance), - /// Some amount was deposited (e.g. for transaction fees). \[who, deposit\] - Deposit(T::AccountId, T::Balance), /// Some balance was reserved (moved from free to reserved). \[who, value\] Reserved(T::AccountId, T::Balance), /// Some balance was unreserved (moved from reserved to free). \[who, value\] @@ -473,6 +471,14 @@ pub mod pallet { /// Final argument indicates the destination balance type. /// \[from, to, balance, destination_status\] ReserveRepatriated(T::AccountId, T::AccountId, T::Balance, Status), + /// Some amount was deposited into the account (e.g. for transaction fees). \[who, + /// deposit\] + Deposit(T::AccountId, T::Balance), + /// Some amount was withdrawn from the account (e.g. for transaction fees). \[who, value\] + Withdraw(T::AccountId, T::Balance), + /// Some amount was removed from the account (e.g. for misbehavior). \[who, + /// amount_slashed\] + Slashed(T::AccountId, T::Balance), } /// Old name generated by `decl_event`. @@ -1103,6 +1109,7 @@ impl, I: 'static> fungible::Mutate for Pallet { Ok(()) })?; TotalIssuance::::mutate(|t| *t += amount); + Self::deposit_event(Event::Deposit(who.clone(), amount)); Ok(()) } @@ -1123,6 +1130,7 @@ impl, I: 'static> fungible::Mutate for Pallet { }, )?; TotalIssuance::::mutate(|t| *t -= actual); + Self::deposit_event(Event::Withdraw(who.clone(), amount)); Ok(actual) } } @@ -1141,7 +1149,10 @@ impl, I: 'static> fungible::Transfer for Pallet impl, I: 'static> fungible::Unbalanced for Pallet { fn set_balance(who: &T::AccountId, amount: Self::Balance) -> DispatchResult { - Self::mutate_account(who, |account| account.free = amount)?; + Self::mutate_account(who, |account| { + account.free = amount; + Self::deposit_event(Event::BalanceSet(who.clone(), account.free, account.reserved)); + })?; Ok(()) } @@ -1583,7 +1594,13 @@ where } }, ) { - Ok(r) => return r, + Ok((imbalance, not_slashed)) => { + Self::deposit_event(Event::Slashed( + who.clone(), + value.saturating_sub(not_slashed), + )); + return (imbalance, not_slashed) + }, Err(_) => (), } } @@ -1608,6 +1625,7 @@ where |account, is_new| -> Result { ensure!(!is_new, Error::::DeadAccount); account.free = account.free.checked_add(&value).ok_or(ArithmeticError::Overflow)?; + Self::deposit_event(Event::Deposit(who.clone(), value)); Ok(PositiveImbalance::new(value)) }, ) @@ -1640,6 +1658,7 @@ where None => return Ok(Self::PositiveImbalance::zero()), }; + Self::deposit_event(Event::Deposit(who.clone(), value)); Ok(PositiveImbalance::new(value)) }, ) @@ -1677,6 +1696,7 @@ where account.free = new_free_account; + Self::deposit_event(Event::Withdraw(who.clone(), value)); Ok(NegativeImbalance::new(value)) }, ) @@ -1709,6 +1729,7 @@ where SignedImbalance::Negative(NegativeImbalance::new(account.free - value)) }; account.free = value; + Self::deposit_event(Event::BalanceSet(who.clone(), account.free, account.reserved)); Ok(imbalance) }, ) @@ -1824,7 +1845,13 @@ where // underflow should never happen, but it if does, there's nothing to be done here. (NegativeImbalance::new(actual), value - actual) }) { - Ok(r) => return r, + Ok((imbalance, not_slashed)) => { + Self::deposit_event(Event::Slashed( + who.clone(), + value.saturating_sub(not_slashed), + )); + return (imbalance, not_slashed) + }, Err(_) => (), } } @@ -1965,6 +1992,7 @@ where // `actual <= to_change` and `to_change <= amount`; qed; reserves[index].amount -= actual; + Self::deposit_event(Event::Slashed(who.clone(), actual)); (imb, value - actual) }, Err(_) => (NegativeImbalance::zero(), value), diff --git a/frame/balances/src/tests.rs b/frame/balances/src/tests.rs index a08643821eba8..6a6ebc692c34a 100644 --- a/frame/balances/src/tests.rs +++ b/frame/balances/src/tests.rs @@ -314,6 +314,7 @@ macro_rules! decl_tests { <$ext_builder>::default().monied(true).build().execute_with(|| { assert_eq!(Balances::total_balance(&1), 10); assert_ok!(Balances::deposit_into_existing(&1, 10).map(drop)); + System::assert_last_event(Event::Balances(crate::Event::Deposit(1, 10))); assert_eq!(Balances::total_balance(&1), 20); assert_eq!(>::get(), 120); }); @@ -341,6 +342,7 @@ macro_rules! decl_tests { fn balance_works() { <$ext_builder>::default().build().execute_with(|| { let _ = Balances::deposit_creating(&1, 42); + System::assert_has_event(Event::Balances(crate::Event::Deposit(1, 42))); assert_eq!(Balances::free_balance(1), 42); assert_eq!(Balances::reserved_balance(1), 0); assert_eq!(Balances::total_balance(&1), 42); @@ -435,6 +437,19 @@ macro_rules! decl_tests { }); } + #[test] + fn withdrawing_balance_should_work() { + <$ext_builder>::default().build().execute_with(|| { + let _ = Balances::deposit_creating(&2, 111); + let _ = Balances::withdraw( + &2, 11, WithdrawReasons::TRANSFER, ExistenceRequirement::KeepAlive + ); + System::assert_last_event(Event::Balances(crate::Event::Withdraw(2, 11))); + assert_eq!(Balances::free_balance(2), 100); + assert_eq!(>::get(), 100); + }); + } + #[test] fn slashing_incomplete_balance_should_work() { <$ext_builder>::default().build().execute_with(|| { @@ -749,6 +764,7 @@ macro_rules! decl_tests { [ Event::System(system::Event::KilledAccount(1)), Event::Balances(crate::Event::DustLost(1, 99)), + Event::Balances(crate::Event::Slashed(1, 1)), ] ); }); @@ -777,7 +793,8 @@ macro_rules! decl_tests { assert_eq!( events(), [ - Event::System(system::Event::KilledAccount(1)) + Event::System(system::Event::KilledAccount(1)), + Event::Balances(crate::Event::Slashed(1, 100)), ] ); }); @@ -797,6 +814,7 @@ macro_rules! decl_tests { assert_eq!(Balances::slash(&1, 900), (NegativeImbalance::new(900), 0)); // Account is still alive assert!(System::account_exists(&1)); + System::assert_last_event(Event::Balances(crate::Event::Slashed(1, 900))); // SCENARIO: Slash will kill account because not enough balance left. assert_ok!(Balances::set_balance(Origin::root(), 1, 1_000, 0)); diff --git a/frame/balances/src/tests_composite.rs b/frame/balances/src/tests_composite.rs index f6faebed39316..60feedb326d8a 100644 --- a/frame/balances/src/tests_composite.rs +++ b/frame/balances/src/tests_composite.rs @@ -76,10 +76,12 @@ impl frame_system::Config for Test { } parameter_types! { pub const TransactionByteFee: u64 = 1; + pub const OperationalFeeMultiplier: u8 = 5; } impl pallet_transaction_payment::Config for Test { type OnChargeTransaction = CurrencyAdapter, ()>; type TransactionByteFee = TransactionByteFee; + type OperationalFeeMultiplier = OperationalFeeMultiplier; type WeightToFee = IdentityFee; type FeeMultiplierUpdate = (); } diff --git a/frame/balances/src/tests_local.rs b/frame/balances/src/tests_local.rs index d8c07aa9c42e5..b2113a916caa5 100644 --- a/frame/balances/src/tests_local.rs +++ b/frame/balances/src/tests_local.rs @@ -78,10 +78,12 @@ impl frame_system::Config for Test { } parameter_types! { pub const TransactionByteFee: u64 = 1; + pub const OperationalFeeMultiplier: u8 = 5; } impl pallet_transaction_payment::Config for Test { type OnChargeTransaction = CurrencyAdapter, ()>; type TransactionByteFee = TransactionByteFee; + type OperationalFeeMultiplier = OperationalFeeMultiplier; type WeightToFee = IdentityFee; type FeeMultiplierUpdate = (); } @@ -171,7 +173,7 @@ fn emit_events_with_no_existential_deposit_suicide_with_dust() { assert_eq!(res, (NegativeImbalance::new(98), 0)); // no events - assert_eq!(events(), []); + assert_eq!(events(), [Event::Balances(crate::Event::Slashed(1, 98))]); let res = Balances::slash(&1, 1); assert_eq!(res, (NegativeImbalance::new(1), 0)); @@ -181,6 +183,7 @@ fn emit_events_with_no_existential_deposit_suicide_with_dust() { [ Event::System(system::Event::KilledAccount(1)), Event::Balances(crate::Event::DustLost(1, 1)), + Event::Balances(crate::Event::Slashed(1, 1)), ] ); }); diff --git a/frame/balances/src/tests_reentrancy.rs b/frame/balances/src/tests_reentrancy.rs index 9c7ba3e1ec824..9a5ebb003af2c 100644 --- a/frame/balances/src/tests_reentrancy.rs +++ b/frame/balances/src/tests_reentrancy.rs @@ -80,10 +80,12 @@ impl frame_system::Config for Test { } parameter_types! { pub const TransactionByteFee: u64 = 1; + pub const OperationalFeeMultiplier: u8 = 5; } impl pallet_transaction_payment::Config for Test { type OnChargeTransaction = CurrencyAdapter, ()>; type TransactionByteFee = TransactionByteFee; + type OperationalFeeMultiplier = OperationalFeeMultiplier; type WeightToFee = IdentityFee; type FeeMultiplierUpdate = (); } @@ -165,11 +167,11 @@ fn transfer_dust_removal_tst1_should_work() { assert_eq!(Balances::free_balance(&1), 1050); // Verify the events - // Number of events expected is 8 - assert_eq!(System::events().len(), 11); + assert_eq!(System::events().len(), 12); System::assert_has_event(Event::Balances(crate::Event::Transfer(2, 3, 450))); System::assert_has_event(Event::Balances(crate::Event::DustLost(2, 50))); + System::assert_has_event(Event::Balances(crate::Event::Deposit(1, 50))); }); } @@ -193,11 +195,11 @@ fn transfer_dust_removal_tst2_should_work() { assert_eq!(Balances::free_balance(&1), 1500); // Verify the events - // Number of events expected is 8 - assert_eq!(System::events().len(), 9); + assert_eq!(System::events().len(), 10); System::assert_has_event(Event::Balances(crate::Event::Transfer(2, 1, 450))); System::assert_has_event(Event::Balances(crate::Event::DustLost(2, 50))); + System::assert_has_event(Event::Balances(crate::Event::Deposit(1, 50))); }); } @@ -230,8 +232,7 @@ fn repatriating_reserved_balance_dust_removal_should_work() { assert_eq!(Balances::free_balance(1), 1500); // Verify the events - // Number of events expected is 10 - assert_eq!(System::events().len(), 10); + assert_eq!(System::events().len(), 11); System::assert_has_event(Event::Balances(crate::Event::ReserveRepatriated( 2, @@ -239,7 +240,7 @@ fn repatriating_reserved_balance_dust_removal_should_work() { 450, Status::Free, ))); - - System::assert_last_event(Event::Balances(crate::Event::DustLost(2, 50))); + System::assert_has_event(Event::Balances(crate::Event::DustLost(2, 50))); + System::assert_last_event(Event::Balances(crate::Event::Deposit(1, 50))); }); } diff --git a/frame/beefy-mmr/Cargo.toml b/frame/beefy-mmr/Cargo.toml new file mode 100644 index 0000000000000..3d4a9a72ddf86 --- /dev/null +++ b/frame/beefy-mmr/Cargo.toml @@ -0,0 +1,56 @@ +[package] +name = "pallet-beefy-mmr" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +description = "BEEFY + MMR runtime utilities" + +[dependencies] +hex = { version = "0.4", optional = true } +codec = { version = "2.2.0", package = "parity-scale-codec", default-features = false, features = ["derive"] } +libsecp256k1 = { version = "0.7.0", default-features = false } +log = { version = "0.4.13", default-features = false } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +serde = { version = "1.0.130", optional = true } + +frame-support = { version = "4.0.0-dev", path = "../support", default-features = false } +frame-system = { version = "4.0.0-dev", path = "../system", default-features = false } +pallet-mmr = { version = "4.0.0-dev", path = "../merkle-mountain-range", default-features = false } +pallet-mmr-primitives = { version = "4.0.0-dev", path = "../merkle-mountain-range/primitives", default-features = false } +pallet-session = { version = "4.0.0-dev", path = "../session", default-features = false } + +sp-core = { version = "4.0.0-dev", path = "../../primitives/core", default-features = false } +sp-io = { version = "4.0.0-dev", path = "../../primitives/io", default-features = false } +sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime", default-features = false } +sp-std = { version = "4.0.0-dev", path = "../../primitives/std", default-features = false } + +beefy-merkle-tree = { version = "4.0.0-dev", path = "./primitives", default-features = false } +beefy-primitives = { version = "4.0.0-dev", path = "../../primitives/beefy", default-features = false } +pallet-beefy = { version = "4.0.0-dev", path = "../beefy", default-features = false } + +[dev-dependencies] +sp-staking = { version = "4.0.0-dev", path = "../../primitives/staking" } +hex-literal = "0.3" + +[features] +default = ["std"] +std = [ + "beefy-merkle-tree/std", + "beefy-primitives/std", + "codec/std", + "frame-support/std", + "frame-system/std", + "hex", + "libsecp256k1/std", + "log/std", + "pallet-beefy/std", + "pallet-mmr-primitives/std", + "pallet-mmr/std", + "pallet-session/std", + "serde", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", +] diff --git a/frame/beefy-mmr/primitives/Cargo.toml b/frame/beefy-mmr/primitives/Cargo.toml new file mode 100644 index 0000000000000..d5dcc0eed3350 --- /dev/null +++ b/frame/beefy-mmr/primitives/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "beefy-merkle-tree" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +description = "A no-std/Substrate compatible library to construct binary merkle tree." + +[dependencies] +hex = { version = "0.4", optional = true, default-features = false } +log = { version = "0.4", optional = true, default-features = false } +tiny-keccak = { version = "2.0.2", features = ["keccak"], optional = true } + +[dev-dependencies] +env_logger = "0.9" +hex = "0.4" +hex-literal = "0.3" + +[features] +debug = ["hex", "log"] +default = ["std", "debug", "keccak"] +keccak = ["tiny-keccak"] +std = [] diff --git a/frame/beefy-mmr/primitives/src/lib.rs b/frame/beefy-mmr/primitives/src/lib.rs new file mode 100644 index 0000000000000..4d4d4e8721ac8 --- /dev/null +++ b/frame/beefy-mmr/primitives/src/lib.rs @@ -0,0 +1,806 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg_attr(not(feature = "std"), no_std)] +#![warn(missing_docs)] + +//! This crate implements a simple binary Merkle Tree utilities required for inter-op with Ethereum +//! bridge & Solidity contract. +//! +//! The implementation is optimised for usage within Substrate Runtime and supports no-std +//! compilation targets. +//! +//! Merkle Tree is constructed from arbitrary-length leaves, that are initially hashed using the +//! same [Hasher] as the inner nodes. +//! Inner nodes are created by concatenating child hashes and hashing again. The implementation +//! does not perform any sorting of the input data (leaves) nor when inner nodes are created. +//! +//! If the number of leaves is not even, last leave (hash of) is promoted to the upper layer. + +#[cfg(not(feature = "std"))] +extern crate alloc; +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; + +/// Supported hashing output size. +/// +/// The size is restricted to 32 bytes to allow for a more optimised implementation. +pub type Hash = [u8; 32]; + +/// Generic hasher trait. +/// +/// Implement the function to support custom way of hashing data. +/// The implementation must return a [Hash] type, so only 32-byte output hashes are supported. +pub trait Hasher { + /// Hash given arbitrary-length piece of data. + fn hash(data: &[u8]) -> Hash; +} + +#[cfg(feature = "keccak")] +mod keccak256 { + use tiny_keccak::{Hasher as _, Keccak}; + + /// Keccak256 hasher implementation. + pub struct Keccak256; + impl Keccak256 { + /// Hash given data. + pub fn hash(data: &[u8]) -> super::Hash { + ::hash(data) + } + } + impl super::Hasher for Keccak256 { + fn hash(data: &[u8]) -> super::Hash { + let mut keccak = Keccak::v256(); + keccak.update(data); + let mut output = [0_u8; 32]; + keccak.finalize(&mut output); + output + } + } +} +#[cfg(feature = "keccak")] +pub use keccak256::Keccak256; + +/// Construct a root hash of a Binary Merkle Tree created from given leaves. +/// +/// See crate-level docs for details about Merkle Tree construction. +/// +/// In case an empty list of leaves is passed the function returns a 0-filled hash. +pub fn merkle_root(leaves: I) -> Hash +where + H: Hasher, + I: IntoIterator, + T: AsRef<[u8]>, +{ + let iter = leaves.into_iter().map(|l| H::hash(l.as_ref())); + merkelize::(iter, &mut ()) +} + +fn merkelize(leaves: I, visitor: &mut V) -> Hash +where + H: Hasher, + V: Visitor, + I: Iterator, +{ + let upper = Vec::with_capacity(leaves.size_hint().0); + let mut next = match merkelize_row::(leaves, upper, visitor) { + Ok(root) => return root, + Err(next) if next.is_empty() => return Hash::default(), + Err(next) => next, + }; + + let mut upper = Vec::with_capacity((next.len() + 1) / 2); + loop { + visitor.move_up(); + + match merkelize_row::(next.drain(..), upper, visitor) { + Ok(root) => return root, + Err(t) => { + // swap collections to avoid allocations + upper = next; + next = t; + }, + }; + } +} + +/// A generated merkle proof. +/// +/// The structure contains all necessary data to later on verify the proof and the leaf itself. +#[derive(Debug, PartialEq, Eq)] +pub struct MerkleProof { + /// Root hash of generated merkle tree. + pub root: Hash, + /// Proof items (does not contain the leaf hash, nor the root obviously). + /// + /// This vec contains all inner node hashes necessary to reconstruct the root hash given the + /// leaf hash. + pub proof: Vec, + /// Number of leaves in the original tree. + /// + /// This is needed to detect a case where we have an odd number of leaves that "get promoted" + /// to upper layers. + pub number_of_leaves: usize, + /// Index of the leaf the proof is for (0-based). + pub leaf_index: usize, + /// Leaf content. + pub leaf: T, +} + +/// A trait of object inspecting merkle root creation. +/// +/// It can be passed to [`merkelize_row`] or [`merkelize`] functions and will be notified +/// about tree traversal. +trait Visitor { + /// We are moving one level up in the tree. + fn move_up(&mut self); + + /// We are creating an inner node from given `left` and `right` nodes. + /// + /// Note that in case of last odd node in the row `right` might be empty. + /// The method will also visit the `root` hash (level 0). + /// + /// The `index` is an index of `left` item. + fn visit(&mut self, index: usize, left: &Option, right: &Option); +} + +/// No-op implementation of the visitor. +impl Visitor for () { + fn move_up(&mut self) {} + fn visit(&mut self, _index: usize, _left: &Option, _right: &Option) {} +} + +/// Construct a Merkle Proof for leaves given by indices. +/// +/// The function constructs a (partial) Merkle Tree first and stores all elements required +/// to prove requested item (leaf) given the root hash. +/// +/// Both the Proof and the Root Hash is returned. +/// +/// # Panic +/// +/// The function will panic if given [`leaf_index`] is greater than the number of leaves. +pub fn merkle_proof(leaves: I, leaf_index: usize) -> MerkleProof +where + H: Hasher, + I: IntoIterator, + I::IntoIter: ExactSizeIterator, + T: AsRef<[u8]>, +{ + let mut leaf = None; + let iter = leaves.into_iter().enumerate().map(|(idx, l)| { + let hash = H::hash(l.as_ref()); + if idx == leaf_index { + leaf = Some(l); + } + hash + }); + + /// The struct collects a proof for single leaf. + struct ProofCollection { + proof: Vec, + position: usize, + } + + impl ProofCollection { + fn new(position: usize) -> Self { + ProofCollection { proof: Default::default(), position } + } + } + + impl Visitor for ProofCollection { + fn move_up(&mut self) { + self.position /= 2; + } + + fn visit(&mut self, index: usize, left: &Option, right: &Option) { + // we are at left branch - right goes to the proof. + if self.position == index { + if let Some(right) = right { + self.proof.push(*right); + } + } + // we are at right branch - left goes to the proof. + if self.position == index + 1 { + if let Some(left) = left { + self.proof.push(*left); + } + } + } + } + + let number_of_leaves = iter.len(); + let mut collect_proof = ProofCollection::new(leaf_index); + + let root = merkelize::(iter, &mut collect_proof); + let leaf = leaf.expect("Requested `leaf_index` is greater than number of leaves."); + + #[cfg(feature = "debug")] + log::debug!( + "[merkle_proof] Proof: {:?}", + collect_proof.proof.iter().map(hex::encode).collect::>() + ); + + MerkleProof { root, proof: collect_proof.proof, number_of_leaves, leaf_index, leaf } +} + +/// Leaf node for proof verification. +/// +/// Can be either a value that needs to be hashed first, +/// or the hash itself. +#[derive(Debug, PartialEq, Eq)] +pub enum Leaf<'a> { + /// Leaf content. + Value(&'a [u8]), + /// Hash of the leaf content. + Hash(Hash), +} + +impl<'a, T: AsRef<[u8]>> From<&'a T> for Leaf<'a> { + fn from(v: &'a T) -> Self { + Leaf::Value(v.as_ref()) + } +} + +impl<'a> From for Leaf<'a> { + fn from(v: Hash) -> Self { + Leaf::Hash(v) + } +} + +/// Verify Merkle Proof correctness versus given root hash. +/// +/// The proof is NOT expected to contain leaf hash as the first +/// element, but only all adjacent nodes required to eventually by process of +/// concatenating and hashing end up with given root hash. +/// +/// The proof must not contain the root hash. +pub fn verify_proof<'a, H, P, L>( + root: &'a Hash, + proof: P, + number_of_leaves: usize, + leaf_index: usize, + leaf: L, +) -> bool +where + H: Hasher, + P: IntoIterator, + L: Into>, +{ + if leaf_index >= number_of_leaves { + return false + } + + let leaf_hash = match leaf.into() { + Leaf::Value(content) => H::hash(content), + Leaf::Hash(hash) => hash, + }; + + let mut combined = [0_u8; 64]; + let mut position = leaf_index; + let mut width = number_of_leaves; + let computed = proof.into_iter().fold(leaf_hash, |a, b| { + if position % 2 == 1 || position + 1 == width { + combined[0..32].copy_from_slice(&b); + combined[32..64].copy_from_slice(&a); + } else { + combined[0..32].copy_from_slice(&a); + combined[32..64].copy_from_slice(&b); + } + let hash = H::hash(&combined); + #[cfg(feature = "debug")] + log::debug!( + "[verify_proof]: (a, b) {:?}, {:?} => {:?} ({:?}) hash", + hex::encode(a), + hex::encode(b), + hex::encode(hash), + hex::encode(combined) + ); + position /= 2; + width = ((width - 1) / 2) + 1; + hash + }); + + root == &computed +} + +/// Processes a single row (layer) of a tree by taking pairs of elements, +/// concatenating them, hashing and placing into resulting vector. +/// +/// In case only one element is provided it is returned via `Ok` result, in any other case (also an +/// empty iterator) an `Err` with the inner nodes of upper layer is returned. +fn merkelize_row( + mut iter: I, + mut next: Vec, + visitor: &mut V, +) -> Result> +where + H: Hasher, + V: Visitor, + I: Iterator, +{ + #[cfg(feature = "debug")] + log::debug!("[merkelize_row]"); + next.clear(); + + let mut index = 0; + let mut combined = [0_u8; 64]; + loop { + let a = iter.next(); + let b = iter.next(); + visitor.visit(index, &a, &b); + + #[cfg(feature = "debug")] + log::debug!(" {:?}\n {:?}", a.as_ref().map(hex::encode), b.as_ref().map(hex::encode)); + + index += 2; + match (a, b) { + (Some(a), Some(b)) => { + combined[0..32].copy_from_slice(&a); + combined[32..64].copy_from_slice(&b); + + next.push(H::hash(&combined)); + }, + // Odd number of items. Promote the item to the upper layer. + (Some(a), None) if !next.is_empty() => { + next.push(a); + }, + // Last item = root. + (Some(a), None) => return Ok(a), + // Finish up, no more items. + _ => { + #[cfg(feature = "debug")] + log::debug!( + "[merkelize_row] Next: {:?}", + next.iter().map(hex::encode).collect::>() + ); + return Err(next) + }, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use hex_literal::hex; + + #[test] + fn should_generate_empty_root() { + // given + let _ = env_logger::try_init(); + let data: Vec<[u8; 1]> = Default::default(); + + // when + let out = merkle_root::(data); + + // then + assert_eq!( + hex::encode(&out), + "0000000000000000000000000000000000000000000000000000000000000000" + ); + } + + #[test] + fn should_generate_single_root() { + // given + let _ = env_logger::try_init(); + let data = vec![hex!("E04CC55ebEE1cBCE552f250e85c57B70B2E2625b")]; + + // when + let out = merkle_root::(data); + + // then + assert_eq!( + hex::encode(&out), + "aeb47a269393297f4b0a3c9c9cfd00c7a4195255274cf39d83dabc2fcc9ff3d7" + ); + } + + #[test] + fn should_generate_root_pow_2() { + // given + let _ = env_logger::try_init(); + let data = vec![ + hex!("E04CC55ebEE1cBCE552f250e85c57B70B2E2625b"), + hex!("25451A4de12dcCc2D166922fA938E900fCc4ED24"), + ]; + + // when + let out = merkle_root::(data); + + // then + assert_eq!( + hex::encode(&out), + "697ea2a8fe5b03468548a7a413424a6292ab44a82a6f5cc594c3fa7dda7ce402" + ); + } + + #[test] + fn should_generate_root_complex() { + let _ = env_logger::try_init(); + let test = |root, data| { + assert_eq!(hex::encode(&merkle_root::(data)), root); + }; + + test( + "aff1208e69c9e8be9b584b07ebac4e48a1ee9d15ce3afe20b77a4d29e4175aa3", + vec!["a", "b", "c"], + ); + + test( + "b8912f7269068901f231a965adfefbc10f0eedcfa61852b103efd54dac7db3d7", + vec!["a", "b", "a"], + ); + + test( + "dc8e73fe6903148ff5079baecc043983625c23b39f31537e322cd0deee09fa9c", + vec!["a", "b", "a", "b"], + ); + + test( + "fb3b3be94be9e983ba5e094c9c51a7d96a4fa2e5d8e891df00ca89ba05bb1239", + vec!["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"], + ); + } + + #[test] + fn should_generate_and_verify_proof_simple() { + // given + let _ = env_logger::try_init(); + let data = vec!["a", "b", "c"]; + + // when + let proof0 = merkle_proof::(data.clone(), 0); + assert!(verify_proof::( + &proof0.root, + proof0.proof.clone(), + data.len(), + proof0.leaf_index, + &proof0.leaf, + )); + + let proof1 = merkle_proof::(data.clone(), 1); + assert!(verify_proof::( + &proof1.root, + proof1.proof, + data.len(), + proof1.leaf_index, + &proof1.leaf, + )); + + let proof2 = merkle_proof::(data.clone(), 2); + assert!(verify_proof::( + &proof2.root, + proof2.proof, + data.len(), + proof2.leaf_index, + &proof2.leaf + )); + + // then + assert_eq!(hex::encode(proof0.root), hex::encode(proof1.root)); + assert_eq!(hex::encode(proof2.root), hex::encode(proof1.root)); + + assert!(!verify_proof::( + &hex!("fb3b3be94be9e983ba5e094c9c51a7d96a4fa2e5d8e891df00ca89ba05bb1239"), + proof0.proof, + data.len(), + proof0.leaf_index, + &proof0.leaf + )); + + assert!(!verify_proof::( + &proof0.root, + vec![], + data.len(), + proof0.leaf_index, + &proof0.leaf + )); + } + + #[test] + fn should_generate_and_verify_proof_complex() { + // given + let _ = env_logger::try_init(); + let data = vec!["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"]; + + for l in 0..data.len() { + // when + let proof = merkle_proof::(data.clone(), l); + // then + assert!(verify_proof::( + &proof.root, + proof.proof, + data.len(), + proof.leaf_index, + &proof.leaf + )); + } + } + + #[test] + fn should_generate_and_verify_proof_large() { + // given + let _ = env_logger::try_init(); + let mut data = vec![]; + for i in 1..16 { + for c in 'a'..'z' { + if c as usize % i != 0 { + data.push(c.to_string()); + } + } + + for l in 0..data.len() { + // when + let proof = merkle_proof::(data.clone(), l); + // then + assert!(verify_proof::( + &proof.root, + proof.proof, + data.len(), + proof.leaf_index, + &proof.leaf + )); + } + } + } + + #[test] + fn should_generate_and_verify_proof_large_tree() { + // given + let _ = env_logger::try_init(); + let mut data = vec![]; + for i in 0..6000 { + data.push(format!("{}", i)); + } + + for l in (0..data.len()).step_by(13) { + // when + let proof = merkle_proof::(data.clone(), l); + // then + assert!(verify_proof::( + &proof.root, + proof.proof, + data.len(), + proof.leaf_index, + &proof.leaf + )); + } + } + + #[test] + #[should_panic] + fn should_panic_on_invalid_leaf_index() { + let _ = env_logger::try_init(); + merkle_proof::(vec!["a"], 5); + } + + #[test] + fn should_generate_and_verify_proof_on_test_data() { + let addresses = vec![ + "0x9aF1Ca5941148eB6A3e9b9C741b69738292C533f", + "0xDD6ca953fddA25c496165D9040F7F77f75B75002", + "0x60e9C47B64Bc1C7C906E891255EaEC19123E7F42", + "0xfa4859480Aa6D899858DE54334d2911E01C070df", + "0x19B9b128470584F7209eEf65B69F3624549Abe6d", + "0xC436aC1f261802C4494504A11fc2926C726cB83b", + "0xc304C8C2c12522F78aD1E28dD86b9947D7744bd0", + "0xDa0C2Cba6e832E55dE89cF4033affc90CC147352", + "0xf850Fd22c96e3501Aad4CDCBf38E4AEC95622411", + "0x684918D4387CEb5E7eda969042f036E226E50642", + "0x963F0A1bFbb6813C0AC88FcDe6ceB96EA634A595", + "0x39B38ad74b8bCc5CE564f7a27Ac19037A95B6099", + "0xC2Dec7Fdd1fef3ee95aD88EC8F3Cd5bd4065f3C7", + "0x9E311f05c2b6A43C2CCF16fB2209491BaBc2ec01", + "0x927607C30eCE4Ef274e250d0bf414d4a210b16f0", + "0x98882bcf85E1E2DFF780D0eB360678C1cf443266", + "0xFBb50191cd0662049E7C4EE32830a4Cc9B353047", + "0x963854fc2C358c48C3F9F0A598B9572c581B8DEF", + "0xF9D7Bc222cF6e3e07bF66711e6f409E51aB75292", + "0xF2E3fd32D063F8bBAcB9e6Ea8101C2edd899AFe6", + "0x407a5b9047B76E8668570120A96d580589fd1325", + "0xEAD9726FAFB900A07dAd24a43AE941d2eFDD6E97", + "0x42f5C8D9384034A9030313B51125C32a526b6ee8", + "0x158fD2529Bc4116570Eb7C80CC76FEf33ad5eD95", + "0x0A436EE2E4dEF3383Cf4546d4278326Ccc82514E", + "0x34229A215db8FeaC93Caf8B5B255e3c6eA51d855", + "0xEb3B7CF8B1840242CB98A732BA464a17D00b5dDF", + "0x2079692bf9ab2d6dc7D79BBDdEE71611E9aA3B72", + "0x46e2A67e5d450e2Cf7317779f8274a2a630f3C9B", + "0xA7Ece4A5390DAB18D08201aE18800375caD78aab", + "0x15E1c0D24D62057Bf082Cb2253dA11Ef0d469570", + "0xADDEF4C9b5687Eb1F7E55F2251916200A3598878", + "0xe0B16Fb96F936035db2b5A68EB37D470fED2f013", + "0x0c9A84993feaa779ae21E39F9793d09e6b69B62D", + "0x3bc4D5148906F70F0A7D1e2756572655fd8b7B34", + "0xFf4675C26903D5319795cbd3a44b109E7DDD9fDe", + "0xCec4450569A8945C6D2Aba0045e4339030128a92", + "0x85f0584B10950E421A32F471635b424063FD8405", + "0xb38bEe7Bdc0bC43c096e206EFdFEad63869929E3", + "0xc9609466274Fef19D0e58E1Ee3b321D5C141067E", + "0xa08EA868cF75268E7401021E9f945BAe73872ecc", + "0x67C9Cb1A29E964Fe87Ff669735cf7eb87f6868fE", + "0x1B6BEF636aFcdd6085cD4455BbcC93796A12F6E2", + "0x46B37b243E09540b55cF91C333188e7D5FD786dD", + "0x8E719E272f62Fa97da93CF9C941F5e53AA09e44a", + "0xa511B7E7DB9cb24AD5c89fBb6032C7a9c2EfA0a5", + "0x4D11FDcAeD335d839132AD450B02af974A3A66f8", + "0xB8cf790a5090E709B4619E1F335317114294E17E", + "0x7f0f57eA064A83210Cafd3a536866ffD2C5eDCB3", + "0xC03C848A4521356EF800e399D889e9c2A25D1f9E", + "0xC6b03DF05cb686D933DD31fCa5A993bF823dc4FE", + "0x58611696b6a8102cf95A32c25612E4cEF32b910F", + "0x2ed4bC7197AEF13560F6771D930Bf907772DE3CE", + "0x3C5E58f334306be029B0e47e119b8977B2639eb4", + "0x288646a1a4FeeC560B349d210263c609aDF649a6", + "0xb4F4981E0d027Dc2B3c86afA0D0fC03d317e83C0", + "0xaAE4A87F8058feDA3971f9DEd639Ec9189aA2500", + "0x355069DA35E598913d8736E5B8340527099960b8", + "0x3cf5A0F274cd243C0A186d9fCBdADad089821B93", + "0xca55155dCc4591538A8A0ca322a56EB0E4aD03C4", + "0xE824D0268366ec5C4F23652b8eD70D552B1F2b8B", + "0x84C3e9B25AE8a9b39FF5E331F9A597F2DCf27Ca9", + "0xcA0018e278751De10d26539915d9c7E7503432FE", + "0xf13077dE6191D6c1509ac7E088b8BE7Fe656c28b", + "0x7a6bcA1ec9Db506e47ac6FD86D001c2aBc59C531", + "0xeA7f9A2A9dd6Ba9bc93ca615C3Ddf26973146911", + "0x8D0d8577e16F8731d4F8712BAbFa97aF4c453458", + "0xB7a7855629dF104246997e9ACa0E6510df75d0ea", + "0x5C1009BDC70b0C8Ab2e5a53931672ab448C17c89", + "0x40B47D1AfefEF5eF41e0789F0285DE7b1C31631C", + "0x5086933d549cEcEB20652CE00973703CF10Da373", + "0xeb364f6FE356882F92ae9314fa96116Cf65F47d8", + "0xdC4D31516A416cEf533C01a92D9a04bbdb85EE67", + "0x9b36E086E5A274332AFd3D8509e12ca5F6af918d", + "0xBC26394fF36e1673aE0608ce91A53B9768aD0D76", + "0x81B5AB400be9e563fA476c100BE898C09966426c", + "0x9d93C8ae5793054D28278A5DE6d4653EC79e90FE", + "0x3B8E75804F71e121008991E3177fc942b6c28F50", + "0xC6Eb5886eB43dD473f5BB4e21e56E08dA464D9B4", + "0xfdf1277b71A73c813cD0e1a94B800f4B1Db66DBE", + "0xc2ff2cCc98971556670e287Ff0CC39DA795231ad", + "0x76b7E1473f0D0A87E9B4a14E2B179266802740f5", + "0xA7Bc965660a6EF4687CCa4F69A97563163A3C2Ef", + "0xB9C2b47888B9F8f7D03dC1de83F3F55E738CebD3", + "0xEd400162E6Dd6bD2271728FFb04176bF770De94a", + "0xE3E8331156700339142189B6E555DCb2c0962750", + "0xbf62e342Bc7706a448EdD52AE871d9C4497A53b1", + "0xb9d7A1A111eed75714a0AcD2dd467E872eE6B03D", + "0x03942919DFD0383b8c574AB8A701d89fd4bfA69D", + "0x0Ef4C92355D3c8c7050DFeb319790EFCcBE6fe9e", + "0xA6895a3cf0C60212a73B3891948ACEcF1753f25E", + "0x0Ed509239DB59ef3503ded3d31013C983d52803A", + "0xc4CE8abD123BfAFc4deFf37c7D11DeCd5c350EE4", + "0x4A4Bf59f7038eDcd8597004f35d7Ee24a7Bdd2d3", + "0x5769E8e8A2656b5ed6b6e6fa2a2bFAeaf970BB87", + "0xf9E15cCE181332F4F57386687c1776b66C377060", + "0xc98f8d4843D56a46C21171900d3eE538Cc74dbb5", + "0x3605965B47544Ce4302b988788B8195601AE4dEd", + "0xe993BDfdcAac2e65018efeE0F69A12678031c71d", + "0x274fDf8801385D3FAc954BCc1446Af45f5a8304c", + "0xBFb3f476fcD6429F4a475bA23cEFdDdd85c6b964", + "0x806cD16588Fe812ae740e931f95A289aFb4a4B50", + "0xa89488CE3bD9C25C3aF797D1bbE6CA689De79d81", + "0xd412f1AfAcf0Ebf3Cd324593A231Fc74CC488B12", + "0xd1f715b2D7951d54bc31210BbD41852D9BF98Ed1", + "0xf65aD707c344171F467b2ADba3d14f312219cE23", + "0x2971a4b242e9566dEF7bcdB7347f5E484E11919B", + "0x12b113D6827E07E7D426649fBd605f427da52314", + "0x1c6CA45171CDb9856A6C9Dba9c5F1216913C1e97", + "0x11cC6ee1d74963Db23294FCE1E3e0A0555779CeA", + "0x8Aa1C721255CDC8F895E4E4c782D86726b068667", + "0xA2cDC1f37510814485129aC6310b22dF04e9Bbf0", + "0xCf531b71d388EB3f5889F1f78E0d77f6fb109767", + "0xBe703e3545B2510979A0cb0C440C0Fba55c6dCB5", + "0x30a35886F989db39c797D8C93880180Fdd71b0c8", + "0x1071370D981F60c47A9Cd27ac0A61873a372cBB2", + "0x3515d74A11e0Cb65F0F46cB70ecf91dD1712daaa", + "0x50500a3c2b7b1229c6884505D00ac6Be29Aecd0C", + "0x9A223c2a11D4FD3585103B21B161a2B771aDA3d1", + "0xd7218df03AD0907e6c08E707B15d9BD14285e657", + "0x76CfD72eF5f93D1a44aD1F80856797fBE060c70a", + "0x44d093cB745944991EFF5cBa151AA6602d6f5420", + "0x626516DfF43bf09A71eb6fd1510E124F96ED0Cde", + "0x6530824632dfe099304E2DC5701cA99E6d031E08", + "0x57e6c423d6a7607160d6379A0c335025A14DaFC0", + "0x3966D4AD461Ef150E0B10163C81E79b9029E69c3", + "0xF608aCfd0C286E23721a3c347b2b65039f6690F1", + "0xbfB8FAac31A25646681936977837f7740fCd0072", + "0xd80aa634a623a7ED1F069a1a3A28a173061705c7", + "0x9122a77B36363e24e12E1E2D73F87b32926D3dF5", + "0x62562f0d1cD31315bCCf176049B6279B2bfc39C2", + "0x48aBF7A2a7119e5675059E27a7082ba7F38498b2", + "0xb4596983AB9A9166b29517acD634415807569e5F", + "0x52519D16E20BC8f5E96Da6d736963e85b2adA118", + "0x7663893C3dC0850EfC5391f5E5887eD723e51B83", + "0x5FF323a29bCC3B5b4B107e177EccEF4272959e61", + "0xee6e499AdDf4364D75c05D50d9344e9daA5A9AdF", + "0x1631b0BD31fF904aD67dD58994C6C2051CDe4E75", + "0xbc208e9723D44B9811C428f6A55722a26204eEF2", + "0xe76103a222Ee2C7Cf05B580858CEe625C4dc00E1", + "0xC71Bb2DBC51760f4fc2D46D84464410760971B8a", + "0xB4C18811e6BFe564D69E12c224FFc57351f7a7ff", + "0xD11DB0F5b41061A887cB7eE9c8711438844C298A", + "0xB931269934A3D4432c084bAAc3d0de8143199F4f", + "0x070037cc85C761946ec43ea2b8A2d5729908A2a1", + "0x2E34aa8C95Ffdbb37f14dCfBcA69291c55Ba48DE", + "0x052D93e8d9220787c31d6D83f87eC7dB088E998f", + "0x498dAC6C69b8b9ad645217050054840f1D91D029", + "0xE4F7D60f9d84301e1fFFd01385a585F3A11F8E89", + "0xEa637992f30eA06460732EDCBaCDa89355c2a107", + "0x4960d8Da07c27CB6Be48a79B96dD70657c57a6bF", + "0x7e471A003C8C9fdc8789Ded9C3dbe371d8aa0329", + "0xd24265Cc10eecb9e8d355CCc0dE4b11C556E74D7", + "0xDE59C8f7557Af779674f41CA2cA855d571018690", + "0x2fA8A6b3b6226d8efC9d8f6EBDc73Ca33DDcA4d8", + "0xe44102664c6c2024673Ff07DFe66E187Db77c65f", + "0x94E3f4f90a5f7CBF2cc2623e66B8583248F01022", + "0x0383EdBbc21D73DEd039E9C1Ff6bf56017b4CC40", + "0x64C3E49898B88d1E0f0d02DA23E0c00A2Cd0cA99", + "0xF4ccfB67b938d82B70bAb20975acFAe402E812E1", + "0x4f9ee5829e9852E32E7BC154D02c91D8E203e074", + "0xb006312eF9713463bB33D22De60444Ba95609f6B", + "0x7Cbe76ef69B52110DDb2e3b441C04dDb11D63248", + "0x70ADEEa65488F439392B869b1Df7241EF317e221", + "0x64C0bf8AA36Ba590477585Bc0D2BDa7970769463", + "0xA4cDc98593CE52d01Fe5Ca47CB3dA5320e0D7592", + "0xc26B34D375533fFc4c5276282Fa5D660F3d8cbcB", + ]; + let root = hex!("72b0acd7c302a84f1f6b6cefe0ba7194b7398afb440e1b44a9dbbe270394ca53"); + + let data = addresses + .into_iter() + .map(|address| hex::decode(&address[2..]).unwrap()) + .collect::>(); + + for l in 0..data.len() { + // when + let proof = merkle_proof::(data.clone(), l); + assert_eq!(hex::encode(&proof.root), hex::encode(&root)); + assert_eq!(proof.leaf_index, l); + assert_eq!(&proof.leaf, &data[l]); + + // then + assert!(verify_proof::( + &proof.root, + proof.proof, + data.len(), + proof.leaf_index, + &proof.leaf + )); + } + + let proof = merkle_proof::(data.clone(), data.len() - 1); + + assert_eq!( + proof, + MerkleProof { + root, + proof: vec![ + hex!("340bcb1d49b2d82802ddbcf5b85043edb3427b65d09d7f758fbc76932ad2da2f"), + hex!("ba0580e5bd530bc93d61276df7969fb5b4ae8f1864b4a28c280249575198ff1f"), + hex!("d02609d2bbdb28aa25f58b85afec937d5a4c85d37925bce6d0cf802f9d76ba79"), + hex!("ae3f8991955ed884613b0a5f40295902eea0e0abe5858fc520b72959bc016d4e"), + ], + number_of_leaves: data.len(), + leaf_index: data.len() - 1, + leaf: hex!("c26B34D375533fFc4c5276282Fa5D660F3d8cbcB").to_vec(), + } + ); + } +} diff --git a/frame/beefy-mmr/src/lib.rs b/frame/beefy-mmr/src/lib.rs new file mode 100644 index 0000000000000..001831639b169 --- /dev/null +++ b/frame/beefy-mmr/src/lib.rs @@ -0,0 +1,236 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg_attr(not(feature = "std"), no_std)] +#![warn(missing_docs)] + +//! A BEEFY+MMR pallet combo. +//! +//! While both BEEFY and Merkle Mountain Range (MMR) can be used separately, +//! these tools were designed to work together in unison. +//! +//! The pallet provides a standardized MMR Leaf format that is can be used +//! to bridge BEEFY+MMR-based networks (both standalone and polkadot-like). +//! +//! The MMR leaf contains: +//! 1. Block number and parent block hash. +//! 2. Merkle Tree Root Hash of next BEEFY validator set. +//! 3. Merkle Tree Root Hash of current parachain heads state. +//! +//! and thanks to versioning can be easily updated in the future. + +use sp_runtime::traits::{Convert, Hash}; +use sp_std::prelude::*; + +use beefy_primitives::mmr::{BeefyNextAuthoritySet, MmrLeaf, MmrLeafVersion}; +use pallet_mmr::primitives::LeafDataProvider; + +use codec::Encode; +use frame_support::traits::Get; + +pub use pallet::*; + +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; + +/// A BEEFY consensus digest item with MMR root hash. +pub struct DepositBeefyDigest(sp_std::marker::PhantomData); + +impl pallet_mmr::primitives::OnNewRoot for DepositBeefyDigest +where + T: pallet_mmr::Config, + T: pallet_beefy::Config, +{ + fn on_new_root(root: &::Hash) { + let digest = sp_runtime::generic::DigestItem::Consensus( + beefy_primitives::BEEFY_ENGINE_ID, + codec::Encode::encode(&beefy_primitives::ConsensusLog::< + ::BeefyId, + >::MmrRoot(*root)), + ); + >::deposit_log(digest); + } +} + +/// Convert BEEFY secp256k1 public keys into Ethereum addresses +pub struct BeefyEcdsaToEthereum; +impl Convert> for BeefyEcdsaToEthereum { + fn convert(a: beefy_primitives::crypto::AuthorityId) -> Vec { + use sp_core::crypto::Public; + let compressed_key = a.as_slice(); + + libsecp256k1::PublicKey::parse_slice( + compressed_key, + Some(libsecp256k1::PublicKeyFormat::Compressed), + ) + // uncompress the key + .map(|pub_key| pub_key.serialize().to_vec()) + // now convert to ETH address + .map(|uncompressed| sp_io::hashing::keccak_256(&uncompressed[1..])[12..].to_vec()) + .map_err(|_| { + log::error!(target: "runtime::beefy", "Invalid BEEFY PublicKey format!"); + }) + .unwrap_or_default() + } +} + +type MerkleRootOf = ::Hash; +type ParaId = u32; +type ParaHead = Vec; + +/// A type that is able to return current list of parachain heads that end up in the MMR leaf. +pub trait ParachainHeadsProvider { + /// Return a list of tuples containing a `ParaId` and Parachain Header data (ParaHead). + /// + /// The returned data does not have to be sorted. + fn parachain_heads() -> Vec<(ParaId, ParaHead)>; +} + +/// A default implementation for runtimes without parachains. +impl ParachainHeadsProvider for () { + fn parachain_heads() -> Vec<(ParaId, ParaHead)> { + Default::default() + } +} + +#[frame_support::pallet] +pub mod pallet { + #![allow(missing_docs)] + + use super::*; + use frame_support::pallet_prelude::*; + + /// BEEFY-MMR pallet. + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + /// The module's configuration trait. + #[pallet::config] + #[pallet::disable_frame_system_supertrait_check] + pub trait Config: pallet_mmr::Config + pallet_beefy::Config { + /// Current leaf version. + /// + /// Specifies the version number added to every leaf that get's appended to the MMR. + /// Read more in [`MmrLeafVersion`] docs about versioning leaves. + type LeafVersion: Get; + + /// Convert BEEFY AuthorityId to a form that would end up in the Merkle Tree. + /// + /// For instance for ECDSA (secp256k1) we want to store uncompressed public keys (65 bytes) + /// and later to Ethereum Addresses (160 bits) to simplify using them on Ethereum chain, + /// but the rest of the Substrate codebase is storing them compressed (33 bytes) for + /// efficiency reasons. + type BeefyAuthorityToMerkleLeaf: Convert<::BeefyId, Vec>; + + /// Retrieve a list of current parachain heads. + /// + /// The trait is implemented for `paras` module, but since not all chains might have + /// parachains, and we want to keep the MMR leaf structure uniform, it's possible to use + /// `()` as well to simply put dummy data to the leaf. + type ParachainHeads: ParachainHeadsProvider; + } + + /// Details of next BEEFY authority set. + /// + /// This storage entry is used as cache for calls to [`update_beefy_next_authority_set`]. + #[pallet::storage] + #[pallet::getter(fn beefy_next_authorities)] + pub type BeefyNextAuthorities = + StorageValue<_, BeefyNextAuthoritySet>, ValueQuery>; +} + +impl LeafDataProvider for Pallet +where + MerkleRootOf: From + Into, +{ + type LeafData = MmrLeaf< + ::BlockNumber, + ::Hash, + MerkleRootOf, + >; + + fn leaf_data() -> Self::LeafData { + MmrLeaf { + version: T::LeafVersion::get(), + parent_number_and_hash: frame_system::Pallet::::leaf_data(), + parachain_heads: Pallet::::parachain_heads_merkle_root(), + beefy_next_authority_set: Pallet::::update_beefy_next_authority_set(), + } + } +} + +impl beefy_merkle_tree::Hasher for Pallet +where + MerkleRootOf: Into, +{ + fn hash(data: &[u8]) -> beefy_merkle_tree::Hash { + ::Hashing::hash(data).into() + } +} + +impl Pallet +where + MerkleRootOf: From + Into, +{ + /// Returns latest root hash of a merkle tree constructed from all active parachain headers. + /// + /// The leafs are sorted by `ParaId` to allow more efficient lookups and non-existence proofs. + /// + /// NOTE this does not include parathreads - only parachains are part of the merkle tree. + /// + /// NOTE This is an initial and inefficient implementation, which re-constructs + /// the merkle tree every block. Instead we should update the merkle root in + /// [Self::on_initialize] call of this pallet and update the merkle tree efficiently (use + /// on-chain storage to persist inner nodes). + fn parachain_heads_merkle_root() -> MerkleRootOf { + let mut para_heads = T::ParachainHeads::parachain_heads(); + para_heads.sort(); + let para_heads = para_heads.into_iter().map(|pair| pair.encode()); + beefy_merkle_tree::merkle_root::(para_heads).into() + } + + /// Returns details of the next BEEFY authority set. + /// + /// Details contain authority set id, authority set length and a merkle root, + /// constructed from uncompressed secp256k1 public keys converted to Ethereum addresses + /// of the next BEEFY authority set. + /// + /// This function will use a storage-cached entry in case the set didn't change, or compute and + /// cache new one in case it did. + fn update_beefy_next_authority_set() -> BeefyNextAuthoritySet> { + let id = pallet_beefy::Pallet::::validator_set_id() + 1; + let current_next = Self::beefy_next_authorities(); + // avoid computing the merkle tree if validator set id didn't change. + if id == current_next.id { + return current_next + } + + let beefy_addresses = pallet_beefy::Pallet::::next_authorities() + .into_iter() + .map(T::BeefyAuthorityToMerkleLeaf::convert) + .collect::>(); + let len = beefy_addresses.len() as u32; + let root = beefy_merkle_tree::merkle_root::(beefy_addresses).into(); + let next_set = BeefyNextAuthoritySet { id, len, root }; + // cache the result + BeefyNextAuthorities::::put(&next_set); + next_set + } +} diff --git a/frame/beefy-mmr/src/mock.rs b/frame/beefy-mmr/src/mock.rs new file mode 100644 index 0000000000000..95b87c360510a --- /dev/null +++ b/frame/beefy-mmr/src/mock.rs @@ -0,0 +1,203 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::vec; + +use beefy_primitives::mmr::MmrLeafVersion; +use frame_support::{ + construct_runtime, parameter_types, sp_io::TestExternalities, traits::GenesisBuild, + BasicExternalities, +}; +use sp_core::{Hasher, H256}; +use sp_runtime::{ + app_crypto::ecdsa::Public, + impl_opaque_keys, + testing::Header, + traits::{BlakeTwo256, ConvertInto, IdentityLookup, Keccak256, OpaqueKeys}, +}; + +use crate as pallet_beefy_mmr; + +pub use beefy_primitives::{crypto::AuthorityId as BeefyId, ConsensusLog, BEEFY_ENGINE_ID}; + +impl_opaque_keys! { + pub struct MockSessionKeys { + pub dummy: pallet_beefy::Pallet, + } +} + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Session: pallet_session::{Pallet, Call, Storage, Event, Config}, + Mmr: pallet_mmr::{Pallet, Storage}, + Beefy: pallet_beefy::{Pallet, Config, Storage}, + BeefyMmr: pallet_beefy_mmr::{Pallet, Storage}, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const SS58Prefix: u8 = 42; +} + +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Call = Call; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = SS58Prefix; + type OnSetCode = (); +} + +parameter_types! { + pub const Period: u64 = 1; + pub const Offset: u64 = 0; +} + +impl pallet_session::Config for Test { + type Event = Event; + type ValidatorId = u64; + type ValidatorIdOf = ConvertInto; + type ShouldEndSession = pallet_session::PeriodicSessions; + type NextSessionRotation = pallet_session::PeriodicSessions; + type SessionManager = MockSessionManager; + type SessionHandler = ::KeyTypeIdProviders; + type Keys = MockSessionKeys; + type WeightInfo = (); +} + +pub type MmrLeaf = beefy_primitives::mmr::MmrLeaf< + ::BlockNumber, + ::Hash, + ::Hash, +>; + +impl pallet_mmr::Config for Test { + const INDEXING_PREFIX: &'static [u8] = b"mmr"; + + type Hashing = Keccak256; + + type Hash = ::Out; + + type LeafData = BeefyMmr; + + type OnNewRoot = pallet_beefy_mmr::DepositBeefyDigest; + + type WeightInfo = (); +} + +impl pallet_beefy::Config for Test { + type BeefyId = BeefyId; +} + +parameter_types! { + pub LeafVersion: MmrLeafVersion = MmrLeafVersion::new(1, 5); +} + +impl pallet_beefy_mmr::Config for Test { + type LeafVersion = LeafVersion; + + type BeefyAuthorityToMerkleLeaf = pallet_beefy_mmr::BeefyEcdsaToEthereum; + + type ParachainHeads = DummyParaHeads; +} + +pub struct DummyParaHeads; +impl pallet_beefy_mmr::ParachainHeadsProvider for DummyParaHeads { + fn parachain_heads() -> Vec<(pallet_beefy_mmr::ParaId, pallet_beefy_mmr::ParaHead)> { + vec![(15, vec![1, 2, 3]), (5, vec![4, 5, 6])] + } +} + +pub struct MockSessionManager; +impl pallet_session::SessionManager for MockSessionManager { + fn end_session(_: sp_staking::SessionIndex) {} + fn start_session(_: sp_staking::SessionIndex) {} + fn new_session(idx: sp_staking::SessionIndex) -> Option> { + if idx == 0 || idx == 1 { + Some(vec![1, 2]) + } else if idx == 2 { + Some(vec![3, 4]) + } else { + None + } + } +} + +// Note, that we can't use `UintAuthorityId` here. Reason is that the implementation +// of `to_public_key()` assumes, that a public key is 32 bytes long. This is true for +// ed25519 and sr25519 but *not* for ecdsa. An ecdsa public key is 33 bytes. +pub fn mock_beefy_id(id: u8) -> BeefyId { + let buf: [u8; 33] = [id; 33]; + let pk = Public::from_raw(buf); + BeefyId::from(pk) +} + +pub fn mock_authorities(vec: Vec) -> Vec<(u64, BeefyId)> { + vec.into_iter().map(|id| ((id as u64), mock_beefy_id(id))).collect() +} + +pub fn new_test_ext(ids: Vec) -> TestExternalities { + new_test_ext_raw_authorities(mock_authorities(ids)) +} + +pub fn new_test_ext_raw_authorities(authorities: Vec<(u64, BeefyId)>) -> TestExternalities { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + + let session_keys: Vec<_> = authorities + .iter() + .enumerate() + .map(|(_, id)| (id.0 as u64, id.0 as u64, MockSessionKeys { dummy: id.1.clone() })) + .collect(); + + BasicExternalities::execute_with_storage(&mut t, || { + for (ref id, ..) in &session_keys { + frame_system::Pallet::::inc_providers(id); + } + }); + + pallet_session::GenesisConfig:: { keys: session_keys } + .assimilate_storage(&mut t) + .unwrap(); + + t.into() +} diff --git a/frame/beefy-mmr/src/tests.rs b/frame/beefy-mmr/src/tests.rs new file mode 100644 index 0000000000000..7c70766623b4d --- /dev/null +++ b/frame/beefy-mmr/src/tests.rs @@ -0,0 +1,148 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::vec; + +use beefy_primitives::{ + mmr::{BeefyNextAuthoritySet, MmrLeafVersion}, + ValidatorSet, +}; +use codec::{Decode, Encode}; +use hex_literal::hex; + +use sp_core::H256; +use sp_io::TestExternalities; +use sp_runtime::{traits::Keccak256, DigestItem}; + +use frame_support::traits::OnInitialize; + +use crate::mock::*; + +fn init_block(block: u64) { + System::set_block_number(block); + Session::on_initialize(block); + Mmr::on_initialize(block); + Beefy::on_initialize(block); + BeefyMmr::on_initialize(block); +} + +pub fn beefy_log(log: ConsensusLog) -> DigestItem { + DigestItem::Consensus(BEEFY_ENGINE_ID, log.encode()) +} + +fn offchain_key(pos: usize) -> Vec { + (::INDEXING_PREFIX, pos as u64).encode() +} + +fn read_mmr_leaf(ext: &mut TestExternalities, index: usize) -> MmrLeaf { + type Node = pallet_mmr_primitives::DataOrHash; + ext.persist_offchain_overlay(); + let offchain_db = ext.offchain_db(); + offchain_db + .get(&offchain_key(index)) + .map(|d| Node::decode(&mut &*d).unwrap()) + .map(|n| match n { + Node::Data(d) => d, + _ => panic!("Unexpected MMR node."), + }) + .unwrap() +} + +#[test] +fn should_contain_mmr_digest() { + let mut ext = new_test_ext(vec![1, 2, 3, 4]); + ext.execute_with(|| { + init_block(1); + + assert_eq!( + System::digest().logs, + vec![beefy_log(ConsensusLog::MmrRoot( + hex!("f3e3afbfa69e89cd1e99f8d3570155962f3346d1d8758dc079be49ef70387758").into() + ))] + ); + + // unique every time + init_block(2); + + assert_eq!( + System::digest().logs, + vec![ + beefy_log(ConsensusLog::MmrRoot( + hex!("f3e3afbfa69e89cd1e99f8d3570155962f3346d1d8758dc079be49ef70387758").into() + )), + beefy_log(ConsensusLog::AuthoritiesChange(ValidatorSet { + validators: vec![mock_beefy_id(3), mock_beefy_id(4),], + id: 1, + })), + beefy_log(ConsensusLog::MmrRoot( + hex!("7d4ae4524bae75d52b63f08eab173b0c263eb95ae2c55c3a1d871241bd0cc559").into() + )), + ] + ); + }); +} + +#[test] +fn should_contain_valid_leaf_data() { + let mut ext = new_test_ext(vec![1, 2, 3, 4]); + ext.execute_with(|| { + init_block(1); + }); + + let mmr_leaf = read_mmr_leaf(&mut ext, 0); + assert_eq!( + mmr_leaf, + MmrLeaf { + version: MmrLeafVersion::new(1, 5), + parent_number_and_hash: (0_u64, H256::repeat_byte(0x45)), + beefy_next_authority_set: BeefyNextAuthoritySet { + id: 1, + len: 2, + root: hex!("01b1a742589773fc054c8f5021a456316ffcec0370b25678b0696e116d1ef9ae") + .into(), + }, + parachain_heads: hex!( + "ed893c8f8cc87195a5d4d2805b011506322036bcace79642aa3e94ab431e442e" + ) + .into(), + } + ); + + // build second block on top + ext.execute_with(|| { + init_block(2); + }); + + let mmr_leaf = read_mmr_leaf(&mut ext, 1); + assert_eq!( + mmr_leaf, + MmrLeaf { + version: MmrLeafVersion::new(1, 5), + parent_number_and_hash: (1_u64, H256::repeat_byte(0x45)), + beefy_next_authority_set: BeefyNextAuthoritySet { + id: 2, + len: 2, + root: hex!("9c6b2c1b0d0b25a008e6c882cc7b415f309965c72ad2b944ac0931048ca31cd5") + .into(), + }, + parachain_heads: hex!( + "ed893c8f8cc87195a5d4d2805b011506322036bcace79642aa3e94ab431e442e" + ) + .into(), + } + ); +} diff --git a/frame/beefy/Cargo.toml b/frame/beefy/Cargo.toml new file mode 100644 index 0000000000000..e5af666e7ca54 --- /dev/null +++ b/frame/beefy/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "pallet-beefy" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" + +[dependencies] +codec = { version = "2.2.0", package = "parity-scale-codec", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } +serde = { version = "1.0.130", optional = true } + +frame-support = { version = "4.0.0-dev", path = "../support", default-features = false } +frame-system = { version = "4.0.0-dev", path = "../system", default-features = false } + +sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime", default-features = false } +sp-std = { version = "4.0.0-dev", path = "../../primitives/std", default-features = false } + +pallet-session = { version = "4.0.0-dev", path = "../session", default-features = false } + +beefy-primitives = { version = "4.0.0-dev", path = "../../primitives/beefy", default-features = false } + +[dev-dependencies] +sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } +sp-io = { version = "4.0.0-dev", path = "../../primitives/io" } +sp-staking = { version = "4.0.0-dev", path = "../../primitives/staking" } + +[features] +default = ["std"] +std = [ + "codec/std", + "scale-info/std", + "serde", + "beefy-primitives/std", + "frame-support/std", + "frame-system/std", + "sp-runtime/std", + "sp-std/std", + "pallet-session/std", +] diff --git a/frame/beefy/src/lib.rs b/frame/beefy/src/lib.rs new file mode 100644 index 0000000000000..3b28d454849cf --- /dev/null +++ b/frame/beefy/src/lib.rs @@ -0,0 +1,179 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg_attr(not(feature = "std"), no_std)] + +use codec::Encode; + +use frame_support::{traits::OneSessionHandler, Parameter}; + +use sp_runtime::{ + generic::DigestItem, + traits::{IsMember, Member}, + RuntimeAppPublic, +}; +use sp_std::prelude::*; + +use beefy_primitives::{AuthorityIndex, ConsensusLog, ValidatorSet, BEEFY_ENGINE_ID}; + +#[cfg(test)] +mod mock; + +#[cfg(test)] +mod tests; + +pub use pallet::*; + +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config { + /// Authority identifier type + type BeefyId: Member + Parameter + RuntimeAppPublic + Default + MaybeSerializeDeserialize; + } + + #[pallet::pallet] + pub struct Pallet(PhantomData); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + #[pallet::call] + impl Pallet {} + + /// The current authorities set + #[pallet::storage] + #[pallet::getter(fn authorities)] + pub(super) type Authorities = StorageValue<_, Vec, ValueQuery>; + + /// The current validator set id + #[pallet::storage] + #[pallet::getter(fn validator_set_id)] + pub(super) type ValidatorSetId = + StorageValue<_, beefy_primitives::ValidatorSetId, ValueQuery>; + + /// Authorities set scheduled to be used with the next session + #[pallet::storage] + #[pallet::getter(fn next_authorities)] + pub(super) type NextAuthorities = StorageValue<_, Vec, ValueQuery>; + + #[pallet::genesis_config] + pub struct GenesisConfig { + pub authorities: Vec, + } + + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + Self { authorities: Vec::new() } + } + } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + Pallet::::initialize_authorities(&self.authorities); + } + } +} + +impl Pallet { + /// Return the current active BEEFY validator set. + pub fn validator_set() -> ValidatorSet { + ValidatorSet:: { validators: Self::authorities(), id: Self::validator_set_id() } + } + + fn change_authorities(new: Vec, queued: Vec) { + // As in GRANDPA, we trigger a validator set change only if the the validator + // set has actually changed. + if new != Self::authorities() { + >::put(&new); + + let next_id = Self::validator_set_id() + 1u64; + >::put(next_id); + + let log: DigestItem = DigestItem::Consensus( + BEEFY_ENGINE_ID, + ConsensusLog::AuthoritiesChange(ValidatorSet { validators: new, id: next_id }) + .encode(), + ); + >::deposit_log(log); + } + + >::put(&queued); + } + + fn initialize_authorities(authorities: &[T::BeefyId]) { + if authorities.is_empty() { + return + } + + assert!(>::get().is_empty(), "Authorities are already initialized!"); + + >::put(authorities); + >::put(0); + // Like `pallet_session`, initialize the next validator set as well. + >::put(authorities); + } +} + +impl sp_runtime::BoundToRuntimeAppPublic for Pallet { + type Public = T::BeefyId; +} + +impl OneSessionHandler for Pallet { + type Key = T::BeefyId; + + fn on_genesis_session<'a, I: 'a>(validators: I) + where + I: Iterator, + { + let authorities = validators.map(|(_, k)| k).collect::>(); + Self::initialize_authorities(&authorities); + } + + fn on_new_session<'a, I: 'a>(changed: bool, validators: I, queued_validators: I) + where + I: Iterator, + { + if changed { + let next_authorities = validators.map(|(_, k)| k).collect::>(); + let next_queued_authorities = queued_validators.map(|(_, k)| k).collect::>(); + + Self::change_authorities(next_authorities, next_queued_authorities); + } + } + + fn on_disabled(i: u32) { + let log: DigestItem = DigestItem::Consensus( + BEEFY_ENGINE_ID, + ConsensusLog::::OnDisabled(i as AuthorityIndex).encode(), + ); + + >::deposit_log(log); + } +} + +impl IsMember for Pallet { + fn is_member(authority_id: &T::BeefyId) -> bool { + Self::authorities().iter().any(|id| id == authority_id) + } +} diff --git a/frame/beefy/src/mock.rs b/frame/beefy/src/mock.rs new file mode 100644 index 0000000000000..a1fbeda4ab35c --- /dev/null +++ b/frame/beefy/src/mock.rs @@ -0,0 +1,164 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::vec; + +use frame_support::{ + construct_runtime, parameter_types, sp_io::TestExternalities, traits::GenesisBuild, + BasicExternalities, +}; +use sp_core::H256; +use sp_runtime::{ + app_crypto::ecdsa::Public, + impl_opaque_keys, + testing::Header, + traits::{BlakeTwo256, ConvertInto, IdentityLookup, OpaqueKeys}, + Perbill, +}; + +use crate as pallet_beefy; + +pub use beefy_primitives::{crypto::AuthorityId as BeefyId, ConsensusLog, BEEFY_ENGINE_ID}; + +impl_opaque_keys! { + pub struct MockSessionKeys { + pub dummy: pallet_beefy::Pallet, + } +} + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Pallet, Call, Config, Storage, Event}, + Beefy: pallet_beefy::{Pallet, Call, Config, Storage}, + Session: pallet_session::{Pallet, Call, Storage, Event, Config}, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const SS58Prefix: u8 = 42; +} + +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Call = Call; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = SS58Prefix; + type OnSetCode = (); +} + +impl pallet_beefy::Config for Test { + type BeefyId = BeefyId; +} + +parameter_types! { + pub const Period: u64 = 1; + pub const Offset: u64 = 0; + pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(33); +} + +impl pallet_session::Config for Test { + type Event = Event; + type ValidatorId = u64; + type ValidatorIdOf = ConvertInto; + type ShouldEndSession = pallet_session::PeriodicSessions; + type NextSessionRotation = pallet_session::PeriodicSessions; + type SessionManager = MockSessionManager; + type SessionHandler = ::KeyTypeIdProviders; + type Keys = MockSessionKeys; + type WeightInfo = (); +} + +pub struct MockSessionManager; + +impl pallet_session::SessionManager for MockSessionManager { + fn end_session(_: sp_staking::SessionIndex) {} + fn start_session(_: sp_staking::SessionIndex) {} + fn new_session(idx: sp_staking::SessionIndex) -> Option> { + if idx == 0 || idx == 1 { + Some(vec![1, 2]) + } else if idx == 2 { + Some(vec![3, 4]) + } else { + None + } + } +} + +// Note, that we can't use `UintAuthorityId` here. Reason is that the implementation +// of `to_public_key()` assumes, that a public key is 32 bytes long. This is true for +// ed25519 and sr25519 but *not* for ecdsa. An ecdsa public key is 33 bytes. +pub fn mock_beefy_id(id: u8) -> BeefyId { + let buf: [u8; 33] = [id; 33]; + let pk = Public::from_raw(buf); + BeefyId::from(pk) +} + +pub fn mock_authorities(vec: Vec) -> Vec<(u64, BeefyId)> { + vec.into_iter().map(|id| ((id as u64), mock_beefy_id(id))).collect() +} + +pub fn new_test_ext(ids: Vec) -> TestExternalities { + new_test_ext_raw_authorities(mock_authorities(ids)) +} + +pub fn new_test_ext_raw_authorities(authorities: Vec<(u64, BeefyId)>) -> TestExternalities { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + + let session_keys: Vec<_> = authorities + .iter() + .enumerate() + .map(|(_, id)| (id.0 as u64, id.0 as u64, MockSessionKeys { dummy: id.1.clone() })) + .collect(); + + BasicExternalities::execute_with_storage(&mut t, || { + for (ref id, ..) in &session_keys { + frame_system::Pallet::::inc_providers(id); + } + }); + + pallet_session::GenesisConfig:: { keys: session_keys } + .assimilate_storage(&mut t) + .unwrap(); + + t.into() +} diff --git a/frame/beefy/src/tests.rs b/frame/beefy/src/tests.rs new file mode 100644 index 0000000000000..24f9acaf76bfc --- /dev/null +++ b/frame/beefy/src/tests.rs @@ -0,0 +1,142 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::vec; + +use beefy_primitives::ValidatorSet; +use codec::Encode; + +use sp_core::H256; +use sp_runtime::DigestItem; + +use frame_support::traits::OnInitialize; + +use crate::mock::*; + +fn init_block(block: u64) { + System::set_block_number(block); + Session::on_initialize(block); +} + +pub fn beefy_log(log: ConsensusLog) -> DigestItem { + DigestItem::Consensus(BEEFY_ENGINE_ID, log.encode()) +} + +#[test] +fn genesis_session_initializes_authorities() { + let want = vec![mock_beefy_id(1), mock_beefy_id(2), mock_beefy_id(3), mock_beefy_id(4)]; + + new_test_ext(vec![1, 2, 3, 4]).execute_with(|| { + let authorities = Beefy::authorities(); + + assert!(authorities.len() == 2); + assert_eq!(want[0], authorities[0]); + assert_eq!(want[1], authorities[1]); + + assert!(Beefy::validator_set_id() == 0); + + let next_authorities = Beefy::next_authorities(); + + assert!(next_authorities.len() == 2); + assert_eq!(want[0], next_authorities[0]); + assert_eq!(want[1], next_authorities[1]); + }); +} + +#[test] +fn session_change_updates_authorities() { + new_test_ext(vec![1, 2, 3, 4]).execute_with(|| { + init_block(1); + + assert!(0 == Beefy::validator_set_id()); + + // no change - no log + assert!(System::digest().logs.is_empty()); + + init_block(2); + + assert!(1 == Beefy::validator_set_id()); + + let want = beefy_log(ConsensusLog::AuthoritiesChange(ValidatorSet { + validators: vec![mock_beefy_id(3), mock_beefy_id(4)], + id: 1, + })); + + let log = System::digest().logs[0].clone(); + + assert_eq!(want, log); + }); +} + +#[test] +fn session_change_updates_next_authorities() { + let want = vec![mock_beefy_id(1), mock_beefy_id(2), mock_beefy_id(3), mock_beefy_id(4)]; + + new_test_ext(vec![1, 2, 3, 4]).execute_with(|| { + init_block(1); + + let next_authorities = Beefy::next_authorities(); + + assert!(next_authorities.len() == 2); + assert_eq!(want[0], next_authorities[0]); + assert_eq!(want[1], next_authorities[1]); + + init_block(2); + + let next_authorities = Beefy::next_authorities(); + + assert!(next_authorities.len() == 2); + assert_eq!(want[2], next_authorities[0]); + assert_eq!(want[3], next_authorities[1]); + }); +} + +#[test] +fn validator_set_at_genesis() { + let want = vec![mock_beefy_id(1), mock_beefy_id(2)]; + + new_test_ext(vec![1, 2, 3, 4]).execute_with(|| { + let vs = Beefy::validator_set(); + + assert_eq!(vs.id, 0u64); + assert_eq!(vs.validators[0], want[0]); + assert_eq!(vs.validators[1], want[1]); + }); +} + +#[test] +fn validator_set_updates_work() { + let want = vec![mock_beefy_id(1), mock_beefy_id(2), mock_beefy_id(3), mock_beefy_id(4)]; + + new_test_ext(vec![1, 2, 3, 4]).execute_with(|| { + init_block(1); + + let vs = Beefy::validator_set(); + + assert_eq!(vs.id, 0u64); + assert_eq!(want[0], vs.validators[0]); + assert_eq!(want[1], vs.validators[1]); + + init_block(2); + + let vs = Beefy::validator_set(); + + assert_eq!(vs.id, 1u64); + assert_eq!(want[2], vs.validators[0]); + assert_eq!(want[3], vs.validators[1]); + }); +} diff --git a/frame/benchmarking/Cargo.toml b/frame/benchmarking/Cargo.toml index ea690d966c979..0600b934d44e2 100644 --- a/frame/benchmarking/Cargo.toml +++ b/frame/benchmarking/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Macro for benchmarking a FRAME runtime." readme = "README.md" @@ -28,7 +28,7 @@ frame-system = { version = "4.0.0-dev", default-features = false, path = "../sys log = { version = "0.4.14", default-features = false } [dev-dependencies] -hex-literal = "0.3.1" +hex-literal = "0.3.3" [features] default = ["std"] diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index 6c124a8a75761..1805424426f6e 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -131,6 +131,13 @@ macro_rules! whitelist { /// let c = 0 .. 10 => setup_c_in_some_other_way(&caller, c); /// }: baz(Origin::Signed(caller)) /// +/// // You may optionally specify the origin type if it can't be determined automatically like +/// // this. +/// baz3 { +/// let caller = account::(b"caller", 0, benchmarks_seed); +/// let l in 1 .. MAX_LENGTH => initialize_l(l); +/// }: baz(Origin::Signed(caller), vec![0u8; l]) +/// /// // this is benchmarking some code that is not a dispatchable. /// populate_a_set { /// let x in 0 .. 10_000; @@ -148,6 +155,12 @@ macro_rules! whitelist { /// benchmark just like a regular benchmark, but only testing at the lowest and highest values for /// each component. The function will return `Ok(())` if the benchmarks return no errors. /// +/// It is also possible to generate one #[test] function per benchmark by calling the +/// `impl_benchmark_test_suite` macro inside the `benchmarks` block. The functions will be named +/// `bench_` and can be run via `cargo test`. +/// You will see one line of output per benchmark. This approach will give you more understandable +/// error messages and allows for parallel benchmark execution. +/// /// You can optionally add a `verify` code block at the end of a benchmark to test any final state /// of your benchmark in a unit test. For example: /// @@ -167,7 +180,8 @@ macro_rules! whitelist { /// /// These `verify` blocks will not affect your benchmark results! /// -/// You can construct benchmark tests like so: +/// You can construct benchmark by using the `impl_benchmark_test_suite` macro or +/// by manually implementing them like so: /// /// ```ignore /// #[test] @@ -186,6 +200,7 @@ macro_rules! benchmarks { $( $rest:tt )* ) => { $crate::benchmarks_iter!( + { } { } { } ( ) @@ -205,6 +220,7 @@ macro_rules! benchmarks_instance { $( $rest:tt )* ) => { $crate::benchmarks_iter!( + { } { I: Instance } { } ( ) @@ -224,6 +240,7 @@ macro_rules! benchmarks_instance_pallet { $( $rest:tt )* ) => { $crate::benchmarks_iter!( + { } { I: 'static } { } ( ) @@ -237,8 +254,60 @@ macro_rules! benchmarks_instance_pallet { #[macro_export] #[doc(hidden)] macro_rules! benchmarks_iter { + // detect and extract `impl_benchmark_test_suite` call: + // - with a semi-colon + ( + { } + { $( $instance:ident: $instance_bound:tt )? } + { $( $where_clause:tt )* } + ( $( $names:tt )* ) + ( $( $names_extra:tt )* ) + ( $( $names_skip_meta:tt )* ) + impl_benchmark_test_suite!( + $bench_module:ident, + $new_test_ext:expr, + $test:path + $(, $( $args:tt )* )?); + $( $rest:tt )* + ) => { + $crate::benchmarks_iter! { + { $bench_module, $new_test_ext, $test $(, $( $args )* )? } + { $( $instance: $instance_bound )? } + { $( $where_clause )* } + ( $( $names )* ) + ( $( $names_extra )* ) + ( $( $names_skip_meta )* ) + $( $rest )* + } + }; + // - without a semicolon + ( + { } + { $( $instance:ident: $instance_bound:tt )? } + { $( $where_clause:tt )* } + ( $( $names:tt )* ) + ( $( $names_extra:tt )* ) + ( $( $names_skip_meta:tt )* ) + impl_benchmark_test_suite!( + $bench_module:ident, + $new_test_ext:expr, + $test:path + $(, $( $args:tt )* )?) + $( $rest:tt )* + ) => { + $crate::benchmarks_iter! { + { $bench_module, $new_test_ext, $test $(, $( $args )* )? } + { $( $instance: $instance_bound )? } + { $( $where_clause )* } + ( $( $names )* ) + ( $( $names_extra )* ) + ( $( $names_skip_meta )* ) + $( $rest )* + } + }; // detect and extract where clause: ( + { $($bench_module:ident, $new_test_ext:expr, $test:path $(, $( $args:tt )* )?)? } { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } ( $( $names:tt )* ) @@ -248,6 +317,7 @@ macro_rules! benchmarks_iter { $( $rest:tt )* ) => { $crate::benchmarks_iter! { + { $($bench_module, $new_test_ext, $test $(, $( $args )* )?)? } { $( $instance: $instance_bound)? } { $( $where_bound )* } ( $( $names )* ) @@ -258,6 +328,7 @@ macro_rules! benchmarks_iter { }; // detect and extract `#[skip_meta]` tag: ( + { $($bench_module:ident, $new_test_ext:expr, $test:path $(, $( $args:tt )* )?)? } { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } ( $( $names:tt )* ) @@ -268,6 +339,7 @@ macro_rules! benchmarks_iter { $( $rest:tt )* ) => { $crate::benchmarks_iter! { + { $($bench_module, $new_test_ext, $test $(, $( $args )* )?)? } { $( $instance: $instance_bound )? } { $( $where_clause )* } ( $( $names )* ) @@ -277,8 +349,9 @@ macro_rules! benchmarks_iter { $( $rest )* } }; - // detect and extract `#[extra] tag: + // detect and extract `#[extra]` tag: ( + { $($bench_module:ident, $new_test_ext:expr, $test:path $(, $( $args:tt )* )?)? } { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } ( $( $names:tt )* ) @@ -289,6 +362,7 @@ macro_rules! benchmarks_iter { $( $rest:tt )* ) => { $crate::benchmarks_iter! { + { $($bench_module, $new_test_ext, $test $(, $( $args )* )?)? } { $( $instance: $instance_bound )? } { $( $where_clause )* } ( $( $names )* ) @@ -300,39 +374,43 @@ macro_rules! benchmarks_iter { }; // mutation arm: ( + { $($bench_module:ident, $new_test_ext:expr, $test:path $(, $( $args:tt )* )?)? } { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } ( $( $names:tt )* ) // This contains $( $( { $instance } )? $name:ident )* ( $( $names_extra:tt )* ) ( $( $names_skip_meta:tt )* ) - $name:ident { $( $code:tt )* }: _ ( $origin:expr $( , $arg:expr )* ) + $name:ident { $( $code:tt )* }: _ $(< $origin_type:ty>)? ( $origin:expr $( , $arg:expr )* ) verify $postcode:block $( $rest:tt )* ) => { $crate::benchmarks_iter! { + { $($bench_module, $new_test_ext, $test $(, $( $args )* )?)? } { $( $instance: $instance_bound )? } { $( $where_clause )* } ( $( $names )* ) ( $( $names_extra )* ) ( $( $names_skip_meta )* ) - $name { $( $code )* }: $name ( $origin $( , $arg )* ) + $name { $( $code )* }: $name $(< $origin_type >)? ( $origin $( , $arg )* ) verify $postcode $( $rest )* } }; // mutation arm: ( + { $($bench_module:ident, $new_test_ext:expr, $test:path $(, $( $args:tt )* )?)? } { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } ( $( $names:tt )* ) ( $( $names_extra:tt )* ) ( $( $names_skip_meta:tt )* ) - $name:ident { $( $code:tt )* }: $dispatch:ident ( $origin:expr $( , $arg:expr )* ) + $name:ident { $( $code:tt )* }: $dispatch:ident $(<$origin_type:ty>)? ( $origin:expr $( , $arg:expr )* ) verify $postcode:block $( $rest:tt )* ) => { $crate::paste::paste! { $crate::benchmarks_iter! { + { $($bench_module, $new_test_ext, $test $(, $( $args )* )?)? } { $( $instance: $instance_bound )? } { $( $where_clause )* } ( $( $names )* ) @@ -350,15 +428,14 @@ macro_rules! benchmarks_iter { &__call ); }: { - let call_decoded = < + let __call_decoded = < Call as $crate::frame_support::codec::Decode >::decode(&mut &__benchmarked_call_encoded[..]) .expect("call is encoded above, encoding must be correct"); - - < - Call as $crate::frame_support::traits::UnfilteredDispatchable - >::dispatch_bypass_filter(call_decoded, $origin.into())?; + let __origin = $crate::to_origin!($origin $(, $origin_type)?); + as $crate::frame_support::traits::UnfilteredDispatchable + >::dispatch_bypass_filter(__call_decoded, __origin)?; } verify $postcode $( $rest )* @@ -367,6 +444,7 @@ macro_rules! benchmarks_iter { }; // iteration arm: ( + { $($bench_module:ident, $new_test_ext:expr, $test:path $(, $( $args:tt )* )?)? } { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } ( $( $names:tt )* ) @@ -394,6 +472,7 @@ macro_rules! benchmarks_iter { ); $crate::benchmarks_iter!( + { $($bench_module, $new_test_ext, $test $(, $( $args )* )?)? } { $( $instance: $instance_bound )? } { $( $where_clause )* } ( $( $names )* { $( $instance )? } $name ) @@ -402,8 +481,40 @@ macro_rules! benchmarks_iter { $( $rest )* ); }; - // iteration-exit arm + // iteration-exit arm which generates a #[test] function for each case. ( + { $bench_module:ident, $new_test_ext:expr, $test:path $(, $( $args:tt )* )? } + { $( $instance:ident: $instance_bound:tt )? } + { $( $where_clause:tt )* } + ( $( $names:tt )* ) + ( $( $names_extra:tt )* ) + ( $( $names_skip_meta:tt )* ) + ) => { + $crate::selected_benchmark!( + { $( $where_clause)* } + { $( $instance: $instance_bound )? } + $( $names )* + ); + $crate::impl_benchmark!( + { $( $where_clause )* } + { $( $instance: $instance_bound )? } + ( $( $names )* ) + ( $( $names_extra ),* ) + ( $( $names_skip_meta ),* ) + ); + $crate::impl_test_function!( + ( $( $names )* ) + ( $( $names_extra )* ) + ( $( $names_skip_meta )* ) + $bench_module, + $new_test_ext, + $test + $(, $( $args )* )? + ); + }; + // iteration-exit arm which doesn't generate a #[test] function for all cases. + ( + { } { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } ( $( $names:tt )* ) @@ -425,6 +536,7 @@ macro_rules! benchmarks_iter { }; // add verify block to _() format ( + { $($bench_module:ident, $new_test_ext:expr, $test:path $(, $( $args:tt )* )?)? } { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } ( $( $names:tt )* ) @@ -434,6 +546,7 @@ macro_rules! benchmarks_iter { $( $rest:tt )* ) => { $crate::benchmarks_iter! { + { $($bench_module, $new_test_ext, $test $(, $( $args )* )?)? } { $( $instance: $instance_bound )? } { $( $where_clause )* } ( $( $names )* ) @@ -446,6 +559,7 @@ macro_rules! benchmarks_iter { }; // add verify block to name() format ( + { $($bench_module:ident, $new_test_ext:expr, $test:path $(, $( $args:tt )* )?)? } { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } ( $( $names:tt )* ) @@ -455,6 +569,7 @@ macro_rules! benchmarks_iter { $( $rest:tt )* ) => { $crate::benchmarks_iter! { + { $($bench_module, $new_test_ext, $test $(, $( $args )* )?)? } { $( $instance: $instance_bound )? } { $( $where_clause )* } ( $( $names )* ) @@ -467,6 +582,7 @@ macro_rules! benchmarks_iter { }; // add verify block to {} format ( + { $($bench_module:ident, $new_test_ext:expr, $test:path $(, $( $args:tt )* )?)? } { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } ( $( $names:tt )* ) @@ -476,6 +592,7 @@ macro_rules! benchmarks_iter { $( $rest:tt )* ) => { $crate::benchmarks_iter!( + { $($bench_module, $new_test_ext, $test $(, $( $args )* )?)? } { $( $instance: $instance_bound )? } { $( $where_clause )* } ( $( $names )* ) @@ -488,6 +605,17 @@ macro_rules! benchmarks_iter { }; } +#[macro_export] +#[doc(hidden)] +macro_rules! to_origin { + ($origin:expr) => { + $origin.into() + }; + ($origin:expr, $origin_type:ty) => { + >::from($origin) + }; +} + #[macro_export] #[doc(hidden)] macro_rules! benchmark_backend { @@ -678,6 +806,100 @@ macro_rules! benchmark_backend { }; } +// Creates #[test] functions for the given bench cases. +#[macro_export] +#[doc(hidden)] +macro_rules! impl_bench_case_tests { + ( + { $module:ident, $new_test_exec:expr, $exec_name:ident, $test:path, $extra:expr } + { $( $names_extra:tt )* } + $( { $( $bench_inst:ident )? } $bench:ident )* + ) + => { + $crate::impl_bench_name_tests!( + $module, $new_test_exec, $exec_name, $test, $extra, + { $( $names_extra )* }, + $( { $bench } )+ + ); + } +} + +// Creates a #[test] function for the given bench name. +#[macro_export] +#[doc(hidden)] +macro_rules! impl_bench_name_tests { + // recursion anchor + ( + $module:ident, $new_test_exec:expr, $exec_name:ident, $test:path, $extra:expr, + { $( $names_extra:tt )* }, + { $name:ident } + ) => { + $crate::paste::paste! { + #[test] + fn [] () { + $new_test_exec.$exec_name(|| { + // Skip all #[extra] benchmarks if $extra is false. + if !($extra) { + let disabled = $crate::vec![ $( stringify!($names_extra).as_ref() ),* ]; + if disabled.contains(&stringify!($name)) { + $crate::log::error!( + "INFO: extra benchmark skipped - {}", + stringify!($name), + ); + return (); + } + } + + // Same per-case logic as when all cases are run in the + // same function. + match std::panic::catch_unwind(|| { + $module::<$test>::[< test_benchmark_ $name >] () + }) { + Err(err) => { + panic!("{}: {:?}", stringify!($name), err); + }, + Ok(Err(err)) => { + match err { + $crate::BenchmarkError::Stop(err) => { + panic!("{}: {:?}", stringify!($name), err); + }, + $crate::BenchmarkError::Override(_) => { + // This is still considered a success condition. + $crate::log::error!( + "WARNING: benchmark error overrided - {}", + stringify!($name), + ); + }, + $crate::BenchmarkError::Skip => { + // This is considered a success condition. + $crate::log::error!( + "WARNING: benchmark error skipped - {}", + stringify!($name), + ); + } + } + }, + Ok(Ok(())) => (), + } + }); + } + } + }; + // recursion tail + ( + $module:ident, $new_test_exec:expr, $exec_name:ident, $test:path, $extra:expr, + { $( $names_extra:tt )* }, + { $name:ident } $( { $rest:ident } )+ + ) => { + // car + $crate::impl_bench_name_tests!($module, $new_test_exec, $exec_name, $test, $extra, + { $( $names_extra )* }, { $name }); + // cdr + $crate::impl_bench_name_tests!($module, $new_test_exec, $exec_name, $test, $extra, + { $( $names_extra )* }, $( { $rest } )+); + }; +} + // Creates a `SelectedBenchmark` enum implementing `BenchmarkingSetup`. // // Every variant must implement [`BenchmarkingSetup`]. @@ -1013,13 +1235,54 @@ macro_rules! impl_benchmark_test { /// new_test_ext().execute_with(|| { /// assert_ok!(test_benchmark_accumulate_dummy::()); /// assert_ok!(test_benchmark_set_dummy::()); -/// assert_ok!(test_benchmark_another_set_dummy::()); /// assert_ok!(test_benchmark_sort_vector::()); /// }); /// } /// } /// ``` /// +/// When called inside the `benchmarks` macro of the `pallet_example` as +/// +/// ```rust,ignore +/// benchmarks! { +/// // Benchmarks omitted for brevity +/// +/// impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::tests::Test); +/// } +/// ``` +/// +/// It expands to the equivalent of: +/// +/// ```rust,ignore +/// #[cfg(test)] +/// mod benchmarking { +/// use super::*; +/// use crate::tests::{new_test_ext, Test}; +/// use frame_support::assert_ok; +/// +/// #[test] +/// fn bench_accumulate_dummy() { +/// new_test_ext().execute_with(|| { +/// assert_ok!(test_benchmark_accumulate_dummy::()); +/// } +/// } +/// +/// #[test] +/// fn bench_set_dummy() { +/// new_test_ext().execute_with(|| { +/// assert_ok!(test_benchmark_set_dummy::()); +/// } +/// } +/// +/// #[test] +/// fn bench_sort_vector() { +/// new_test_ext().execute_with(|| { +/// assert_ok!(test_benchmark_sort_vector::()); +/// } +/// } +/// } +/// ``` +/// /// ## Arguments /// /// The first argument, `module`, must be the path to this crate's module. @@ -1092,16 +1355,50 @@ macro_rules! impl_benchmark_test { // just iterate over the `Benchmarking::benchmarks` list to run the actual implementations. #[macro_export] macro_rules! impl_benchmark_test_suite { + ( + $bench_module:ident, + $new_test_ext:expr, + $test:path + $(, $( $rest:tt )* )? + ) => { + $crate::impl_test_function!( + () + () + () + $bench_module, + $new_test_ext, + $test + $(, $( $rest )* )? + ); + } +} + +// Takes all arguments from `impl_benchmark_test_suite` and three additional arguments. +// +// Can be configured to generate one #[test] fn per bench case or +// one #[test] fn for all bench cases. +// This depends on whether or not the first argument contains a non-empty list of bench names. +#[macro_export] +#[doc(hidden)] +macro_rules! impl_test_function { // user might or might not have set some keyword arguments; set the defaults // // The weird syntax indicates that `rest` comes only after a comma, which is otherwise optional ( + ( $( $names:tt )* ) + ( $( $names_extra:tt )* ) + ( $( $names_skip_meta:tt )* ) + $bench_module:ident, $new_test_ext:expr, $test:path $(, $( $rest:tt )* )? ) => { - $crate::impl_benchmark_test_suite!( + $crate::impl_test_function!( + @cases: + ( $( $names )* ) + ( $( $names_extra )* ) + ( $( $names_skip_meta )* ) @selected: $bench_module, $new_test_ext, @@ -1115,6 +1412,10 @@ macro_rules! impl_benchmark_test_suite { }; // pick off the benchmarks_path keyword argument ( + @cases: + ( $( $names:tt )* ) + ( $( $names_extra:tt )* ) + ( $( $names_skip_meta:tt )* ) @selected: $bench_module:ident, $new_test_ext:expr, @@ -1126,7 +1427,11 @@ macro_rules! impl_benchmark_test_suite { benchmarks_path = $benchmarks_path:ident $(, $( $rest:tt )* )? ) => { - $crate::impl_benchmark_test_suite!( + $crate::impl_test_function!( + @cases: + ( $( $names )* ) + ( $( $names_extra )* ) + ( $( $names_skip_meta )* ) @selected: $bench_module, $new_test_ext, @@ -1140,6 +1445,10 @@ macro_rules! impl_benchmark_test_suite { }; // pick off the extra keyword argument ( + @cases: + ( $( $names:tt )* ) + ( $( $names_extra:tt )* ) + ( $( $names_skip_meta:tt )* ) @selected: $bench_module:ident, $new_test_ext:expr, @@ -1151,7 +1460,11 @@ macro_rules! impl_benchmark_test_suite { extra = $extra:expr $(, $( $rest:tt )* )? ) => { - $crate::impl_benchmark_test_suite!( + $crate::impl_test_function!( + @cases: + ( $( $names )* ) + ( $( $names_extra )* ) + ( $( $names_skip_meta )* ) @selected: $bench_module, $new_test_ext, @@ -1165,6 +1478,10 @@ macro_rules! impl_benchmark_test_suite { }; // pick off the exec_name keyword argument ( + @cases: + ( $( $names:tt )* ) + ( $( $names_extra:tt )* ) + ( $( $names_skip_meta:tt )* ) @selected: $bench_module:ident, $new_test_ext:expr, @@ -1176,7 +1493,11 @@ macro_rules! impl_benchmark_test_suite { exec_name = $exec_name:ident $(, $( $rest:tt )* )? ) => { - $crate::impl_benchmark_test_suite!( + $crate::impl_test_function!( + @cases: + ( $( $names )* ) + ( $( $names_extra )* ) + ( $( $names_skip_meta )* ) @selected: $bench_module, $new_test_ext, @@ -1188,8 +1509,34 @@ macro_rules! impl_benchmark_test_suite { $( $( $rest )* )? ); }; - // all options set; nothing else in user-provided keyword arguments + // iteration-exit arm which generates a #[test] function for each case. + ( + @cases: + ( $( $names:tt )+ ) + ( $( $names_extra:tt )* ) + ( $( $names_skip_meta:tt )* ) + @selected: + $bench_module:ident, + $new_test_ext:expr, + $test:path, + benchmarks_path = $path_to_benchmarks_invocation:ident, + extra = $extra:expr, + exec_name = $exec_name:ident, + @user: + $(,)? + ) => { + $crate::impl_bench_case_tests!( + { $bench_module, $new_test_ext, $exec_name, $test, $extra } + { $( $names_extra:tt )* } + $($names)+ + ); + }; + // iteration-exit arm which generates one #[test] function for all cases. ( + @cases: + () + () + () @selected: $bench_module:ident, $new_test_ext:expr, diff --git a/frame/benchmarking/src/tests_instance.rs b/frame/benchmarking/src/tests_instance.rs index caccebd39c70b..0ad156ce5a88d 100644 --- a/frame/benchmarking/src/tests_instance.rs +++ b/frame/benchmarking/src/tests_instance.rs @@ -173,11 +173,11 @@ mod benchmarks { } verify { ensure!(m[0] == 0, "You forgot to sort!") } - } - crate::impl_benchmark_test_suite!( - Pallet, - crate::tests_instance::new_test_ext(), - crate::tests_instance::Test - ); + impl_benchmark_test_suite!( + Pallet, + crate::tests_instance::new_test_ext(), + crate::tests_instance::Test + ) + } } diff --git a/frame/benchmarking/src/utils.rs b/frame/benchmarking/src/utils.rs index 158f5c5b57573..c24ad2f64e18d 100644 --- a/frame/benchmarking/src/utils.rs +++ b/frame/benchmarking/src/utils.rs @@ -16,7 +16,6 @@ // limitations under the License. //! Interfaces, types and utils for benchmarking a FRAME runtime. - use codec::{Decode, Encode}; use frame_support::{ dispatch::{DispatchError, DispatchErrorWithPostInfo}, diff --git a/frame/bounties/Cargo.toml b/frame/bounties/Cargo.toml index 93a7ababb2ebd..d949d0fb1d58e 100644 --- a/frame/bounties/Cargo.toml +++ b/frame/bounties/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet to manage bounties" readme = "README.md" diff --git a/frame/bounties/src/benchmarking.rs b/frame/bounties/src/benchmarking.rs index 1aa1eabdb5177..33af02fbb9ea0 100644 --- a/frame/bounties/src/benchmarking.rs +++ b/frame/bounties/src/benchmarking.rs @@ -21,7 +21,7 @@ use super::*; -use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_benchmarking::{account, benchmarks, whitelisted_caller}; use frame_system::RawOrigin; use sp_runtime::traits::Bounded; @@ -209,6 +209,6 @@ benchmarks! { ensure!(missed_any == false, "Missed some"); assert_last_event::(Event::BountyBecameActive(b - 1).into()) } -} -impl_benchmark_test_suite!(Bounties, crate::tests::new_test_ext(), crate::tests::Test); + impl_benchmark_test_suite!(Bounties, crate::tests::new_test_ext(), crate::tests::Test) +} diff --git a/frame/collective/Cargo.toml b/frame/collective/Cargo.toml index e88f28d417730..3e3d167522e81 100644 --- a/frame/collective/Cargo.toml +++ b/frame/collective/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Collective system: Members of a set of account IDs can make their collective feelings known through dispatched calls from one of two specialized origins." readme = "README.md" diff --git a/frame/collective/src/benchmarking.rs b/frame/collective/src/benchmarking.rs index c7e695babf27d..c26a2b43f5b75 100644 --- a/frame/collective/src/benchmarking.rs +++ b/frame/collective/src/benchmarking.rs @@ -23,9 +23,7 @@ use crate::Pallet as Collective; use sp_runtime::traits::Bounded; use sp_std::mem::size_of; -use frame_benchmarking::{ - account, benchmarks_instance_pallet, impl_benchmark_test_suite, whitelisted_caller, -}; +use frame_benchmarking::{account, benchmarks_instance_pallet, whitelisted_caller}; use frame_system::{Call as SystemCall, Pallet as System, RawOrigin as SystemOrigin}; const SEED: u32 = 0; @@ -638,6 +636,6 @@ benchmarks_instance_pallet! { assert_eq!(Collective::::proposals().len(), (p - 1) as usize); assert_last_event::(Event::Disapproved(last_hash).into()); } -} -impl_benchmark_test_suite!(Collective, crate::tests::new_test_ext(), crate::tests::Test); + impl_benchmark_test_suite!(Collective, crate::tests::new_test_ext(), crate::tests::Test); +} diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index 80dc0b05e7511..5967600bf68f5 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet for WASM contracts" readme = "README.md" @@ -28,7 +28,7 @@ smallvec = { version = "1", default-features = false, features = [ wasmi-validation = { version = "0.4", default-features = false } # Only used in benchmarking to generate random contract code -libsecp256k1 = { version = "0.3.5", optional = true, default-features = false, features = ["hmac"] } +libsecp256k1 = { version = "0.6.0", optional = true, default-features = false, features = ["hmac", "static-context"] } rand = { version = "0.7.3", optional = true, default-features = false } rand_pcg = { version = "0.2", optional = true } @@ -47,7 +47,7 @@ sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primit [dev-dependencies] assert_matches = "1" hex-literal = "0.3" -pretty_assertions = "0.7" +pretty_assertions = "1.0.0" wat = "1" # Substrate Dependencies diff --git a/frame/contracts/README.md b/frame/contracts/README.md index f3a8d13f6e77d..978ee25fcb109 100644 --- a/frame/contracts/README.md +++ b/frame/contracts/README.md @@ -34,7 +34,7 @@ reverted at the current call's contract level. For example, if contract A calls then all of B's calls are reverted. Assuming correct error handling by contract A, A's other calls and state changes still persist. -One gas is equivalent to one [weight](https://substrate.dev/docs/en/knowledgebase/learn-substrate/weight) +One gas is equivalent to one [weight](https://docs.substrate.io/v3/runtime/weights-and-fees) which is defined as one picosecond of execution time on the runtime's reference machine. ### Notable Scenarios diff --git a/frame/contracts/common/Cargo.toml b/frame/contracts/common/Cargo.toml index b441d88453ae2..48baf23d3aee8 100644 --- a/frame/contracts/common/Cargo.toml +++ b/frame/contracts/common/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "A crate that hosts a common definitions that are relevant for the pallet-contracts." readme = "README.md" diff --git a/frame/contracts/proc-macro/Cargo.toml b/frame/contracts/proc-macro/Cargo.toml index 605c69fe73e25..63d05bcc2f05e 100644 --- a/frame/contracts/proc-macro/Cargo.toml +++ b/frame/contracts/proc-macro/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Procedural macros used in pallet_contracts" diff --git a/frame/contracts/rpc/Cargo.toml b/frame/contracts/rpc/Cargo.toml index b73039ba7191e..eec02b03aaa0e 100644 --- a/frame/contracts/rpc/Cargo.toml +++ b/frame/contracts/rpc/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Node-specific RPC methods for interaction with contracts." readme = "README.md" diff --git a/frame/contracts/rpc/runtime-api/Cargo.toml b/frame/contracts/rpc/runtime-api/Cargo.toml index e5f6d1ec7eb8e..e41aa5aaec9b5 100644 --- a/frame/contracts/rpc/runtime-api/Cargo.toml +++ b/frame/contracts/rpc/runtime-api/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Runtime API definition required by Contracts RPC extensions." readme = "README.md" diff --git a/frame/contracts/src/benchmarking/mod.rs b/frame/contracts/src/benchmarking/mod.rs index db657e618322e..5c753c2d95558 100644 --- a/frame/contracts/src/benchmarking/mod.rs +++ b/frame/contracts/src/benchmarking/mod.rs @@ -36,7 +36,7 @@ use crate::{ Pallet as Contracts, *, }; use codec::Encode; -use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_benchmarking::{account, benchmarks, whitelisted_caller}; use frame_support::weights::Weight; use frame_system::RawOrigin; use pwasm_utils::parity_wasm::elements::{BlockType, BrTableData, Instruction, ValueType}; @@ -1425,7 +1425,7 @@ benchmarks! { let message_hash = sp_io::hashing::blake2_256("Hello world".as_bytes()); let signatures = (0..r * API_BENCHMARK_BATCH_SIZE) .map(|i| { - use secp256k1::{SecretKey, Message, sign}; + use libsecp256k1::{SecretKey, Message, sign}; let private_key = SecretKey::random(&mut rng); let (signature, recovery_id) = sign(&Message::parse(&message_hash), &private_key); @@ -2241,7 +2241,7 @@ benchmarks! { ); } #[cfg(not(feature = "std"))] - return Err("Run this bench with a native runtime in order to see the schedule.".into()); + Err("Run this bench with a native runtime in order to see the schedule.")?; }: {} // Execute one erc20 transfer using the ink! erc20 example contract. @@ -2325,10 +2325,10 @@ benchmarks! { ) .result?; } -} -impl_benchmark_test_suite!( - Contracts, - crate::tests::ExtBuilder::default().build(), - crate::tests::Test, -); + impl_benchmark_test_suite!( + Contracts, + crate::tests::ExtBuilder::default().build(), + crate::tests::Test, + ) +} diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index cc468466c2922..7fa0b0b274449 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -660,7 +660,10 @@ where } // Deposit an instantiation event. - deposit_event::(vec![], Event::Instantiated(self.caller().clone(), account_id)); + deposit_event::( + vec![], + Event::Instantiated { deployer: self.caller().clone(), contract: account_id }, + ); } Ok(output) @@ -942,10 +945,10 @@ where )?; ContractInfoOf::::remove(&frame.account_id); E::remove_user(info.code_hash, &mut frame.nested_meter)?; - Contracts::::deposit_event(Event::Terminated( - frame.account_id.clone(), - beneficiary.clone(), - )); + Contracts::::deposit_event(Event::Terminated { + contract: frame.account_id.clone(), + beneficiary: beneficiary.clone(), + }); Ok(()) } @@ -997,7 +1000,7 @@ where fn deposit_event(&mut self, topics: Vec, data: Vec) { deposit_event::( topics, - Event::ContractEmitted(self.top_frame().account_id.clone(), data), + Event::ContractEmitted { contract: self.top_frame().account_id.clone(), data }, ); } @@ -1662,7 +1665,10 @@ mod tests { Storage::::code_hash(&instantiated_contract_address).unwrap(), dummy_ch ); - assert_eq!(&events(), &[Event::Instantiated(ALICE, instantiated_contract_address)]); + assert_eq!( + &events(), + &[Event::Instantiated { deployer: ALICE, contract: instantiated_contract_address }] + ); }); } @@ -1751,7 +1757,10 @@ mod tests { Storage::::code_hash(&instantiated_contract_address).unwrap(), dummy_ch ); - assert_eq!(&events(), &[Event::Instantiated(BOB, instantiated_contract_address)]); + assert_eq!( + &events(), + &[Event::Instantiated { deployer: BOB, contract: instantiated_contract_address }] + ); }); } diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 77efcc6986e64..62b74b9b7b954 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -104,7 +104,7 @@ pub use crate::{ schedule::{HostFnWeights, InstructionWeights, Limits, Schedule}, }; use crate::{ - exec::{Executable, Stack as ExecStack}, + exec::{AccountIdOf, ExecError, Executable, Stack as ExecStack}, gas::GasMeter, storage::{ContractInfo, DeletedContract, Storage}, wasm::PrefabWasmModule, @@ -112,13 +112,14 @@ use crate::{ }; use frame_support::{ dispatch::Dispatchable, + ensure, traits::{Contains, Currency, Get, Randomness, StorageVersion, Time}, weights::{GetDispatchInfo, PostDispatchInfo, Weight}, }; use frame_system::Pallet as System; use pallet_contracts_primitives::{ - Code, ContractAccessError, ContractExecResult, ContractInstantiateResult, GetStorageResult, - InstantiateReturnValue, + Code, ContractAccessError, ContractExecResult, ContractInstantiateResult, ExecReturnValue, + GetStorageResult, InstantiateReturnValue, }; use sp_core::{crypto::UncheckedFrom, Bytes}; use sp_runtime::traits::{Convert, Hash, Saturating, StaticLookup}; @@ -272,18 +273,8 @@ pub mod pallet { ) -> DispatchResultWithPostInfo { let origin = ensure_signed(origin)?; let dest = T::Lookup::lookup(dest)?; - let mut gas_meter = GasMeter::new(gas_limit); - let schedule = T::Schedule::get(); - let result = ExecStack::>::run_call( - origin, - dest, - &mut gas_meter, - &schedule, - value, - data, - None, - ); - gas_meter.into_dispatch_result(result, T::WeightInfo::call()) + let output = Self::internal_call(origin, dest, value, gas_limit, data, None); + output.gas_meter.into_dispatch_result(output.result, T::WeightInfo::call()) } /// Instantiates a new contract from the supplied `code` optionally transferring @@ -325,26 +316,19 @@ pub mod pallet { ) -> DispatchResultWithPostInfo { let origin = ensure_signed(origin)?; let code_len = code.len() as u32; - ensure!(code_len <= T::Schedule::get().limits.code_len, Error::::CodeTooLarge); - let mut gas_meter = GasMeter::new(gas_limit); - let schedule = T::Schedule::get(); - let executable = PrefabWasmModule::from_code(code, &schedule)?; - let code_len = executable.code_len(); - ensure!(code_len <= T::Schedule::get().limits.code_len, Error::::CodeTooLarge); - let result = ExecStack::>::run_instantiate( + let salt_len = salt.len() as u32; + let output = Self::internal_instantiate( origin, - executable, - &mut gas_meter, - &schedule, endowment, + gas_limit, + Code::Upload(Bytes(code)), data, - &salt, + salt, None, - ) - .map(|(_address, output)| output); - gas_meter.into_dispatch_result( - result, - T::WeightInfo::instantiate_with_code(code_len / 1024, salt.len() as u32 / 1024), + ); + output.gas_meter.into_dispatch_result( + output.result.map(|(_address, result)| result), + T::WeightInfo::instantiate_with_code(code_len / 1024, salt_len / 1024), ) } @@ -365,71 +349,64 @@ pub mod pallet { salt: Vec, ) -> DispatchResultWithPostInfo { let origin = ensure_signed(origin)?; - let mut gas_meter = GasMeter::new(gas_limit); - let schedule = T::Schedule::get(); - let executable = PrefabWasmModule::from_storage(code_hash, &schedule, &mut gas_meter)?; - let result = ExecStack::>::run_instantiate( + let salt_len = salt.len() as u32; + let output = Self::internal_instantiate( origin, - executable, - &mut gas_meter, - &schedule, endowment, + gas_limit, + Code::Existing(code_hash), data, - &salt, + salt, None, + ); + output.gas_meter.into_dispatch_result( + output.result.map(|(_address, output)| output), + T::WeightInfo::instantiate(salt_len / 1024), ) - .map(|(_address, output)| output); - gas_meter - .into_dispatch_result(result, T::WeightInfo::instantiate(salt.len() as u32 / 1024)) } } #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { - /// Contract deployed by address at the specified address. \[deployer, contract\] - Instantiated(T::AccountId, T::AccountId), + /// Contract deployed by address at the specified address. + Instantiated { deployer: T::AccountId, contract: T::AccountId }, /// Contract has been removed. - /// \[contract, beneficiary\] - /// - /// # Params - /// - /// - `contract`: The contract that was terminated. - /// - `beneficiary`: The account that received the contracts remaining balance. /// /// # Note /// /// The only way for a contract to be removed and emitting this event is by calling /// `seal_terminate`. - Terminated(T::AccountId, T::AccountId), + Terminated { + /// The contract that was terminated. + contract: T::AccountId, + /// The account that received the contracts remaining balance + beneficiary: T::AccountId, + }, - /// Code with the specified hash has been stored. \[code_hash\] - CodeStored(T::Hash), + /// Code with the specified hash has been stored. + CodeStored { code_hash: T::Hash }, /// Triggered when the current schedule is updated. - /// \[version\] - /// - /// # Params - /// - /// - `version`: The version of the newly set schedule. - ScheduleUpdated(u32), + ScheduleUpdated { + /// The version of the newly set schedule. + version: u32, + }, /// A custom event emitted by the contract. - /// \[contract, data\] - /// - /// # Params - /// - /// - `contract`: The contract that emitted the event. - /// - `data`: Data supplied by the contract. Metadata generated during contract compilation - /// is needed to decode it. - ContractEmitted(T::AccountId, Vec), + ContractEmitted { + /// The contract that emitted the event. + contract: T::AccountId, + /// Data supplied by the contract. Metadata generated during contract compilation + /// is needed to decode it. + data: Vec, + }, /// A code with the specified hash was removed. - /// \[code_hash\] /// /// This happens when the last contract that uses this code hash was removed. - CodeRemoved(T::Hash), + CodeRemoved { code_hash: T::Hash }, } #[pallet::error] @@ -535,6 +512,20 @@ pub mod pallet { pub(crate) type DeletionQueue = StorageValue<_, Vec, ValueQuery>; } +/// Return type of the private [`Pallet::internal_call`] function. +type InternalCallOutput = InternalOutput; + +/// Return type of the private [`Pallet::internal_instantiate`] function. +type InternalInstantiateOutput = InternalOutput, ExecReturnValue)>; + +/// Return type of private helper functions. +struct InternalOutput { + /// The gas meter that was used to execute the call. + gas_meter: GasMeter, + /// The result of the call. + result: Result, +} + impl Pallet where T::AccountId: UncheckedFrom + AsRef<[u8]>, @@ -556,25 +547,16 @@ where dest: T::AccountId, value: BalanceOf, gas_limit: Weight, - input_data: Vec, + data: Vec, debug: bool, ) -> ContractExecResult { - let mut gas_meter = GasMeter::new(gas_limit); - let schedule = T::Schedule::get(); let mut debug_message = if debug { Some(Vec::new()) } else { None }; - let result = ExecStack::>::run_call( - origin, - dest, - &mut gas_meter, - &schedule, - value, - input_data, - debug_message.as_mut(), - ); + let output = + Self::internal_call(origin, dest, value, gas_limit, data, debug_message.as_mut()); ContractExecResult { - result: result.map_err(|r| r.error), - gas_consumed: gas_meter.gas_consumed(), - gas_required: gas_meter.gas_required(), + result: output.result.map_err(|r| r.error), + gas_consumed: output.gas_meter.gas_consumed(), + gas_required: output.gas_meter.gas_required(), debug_message: debug_message.unwrap_or_default(), } } @@ -601,38 +583,23 @@ where salt: Vec, debug: bool, ) -> ContractInstantiateResult { - let mut gas_meter = GasMeter::new(gas_limit); - let schedule = T::Schedule::get(); - let executable = match code { - Code::Upload(Bytes(binary)) => PrefabWasmModule::from_code(binary, &schedule), - Code::Existing(hash) => PrefabWasmModule::from_storage(hash, &schedule, &mut gas_meter), - }; - let executable = match executable { - Ok(executable) => executable, - Err(error) => - return ContractInstantiateResult { - result: Err(error.into()), - gas_consumed: gas_meter.gas_consumed(), - gas_required: gas_meter.gas_required(), - debug_message: Vec::new(), - }, - }; let mut debug_message = if debug { Some(Vec::new()) } else { None }; - let result = ExecStack::>::run_instantiate( + let output = Self::internal_instantiate( origin, - executable, - &mut gas_meter, - &schedule, endowment, + gas_limit, + code, data, - &salt, + salt, debug_message.as_mut(), - ) - .and_then(|(account_id, result)| Ok(InstantiateReturnValue { result, account_id })); + ); ContractInstantiateResult { - result: result.map_err(|e| e.error), - gas_consumed: gas_meter.gas_consumed(), - gas_required: gas_meter.gas_required(), + result: output + .result + .map(|(account_id, result)| InstantiateReturnValue { result, account_id }) + .map_err(|e| e.error), + gas_consumed: output.gas_meter.gas_consumed(), + gas_required: output.gas_meter.gas_required(), debug_message: debug_message.unwrap_or_default(), } } @@ -709,4 +676,74 @@ where ) -> frame_support::dispatch::DispatchResult { self::wasm::reinstrument(module, schedule) } + + /// Internal function that does the actual call. + /// + /// Called by dispatchables and public functions. + fn internal_call( + origin: T::AccountId, + dest: T::AccountId, + value: BalanceOf, + gas_limit: Weight, + data: Vec, + debug_message: Option<&mut Vec>, + ) -> InternalCallOutput { + let mut gas_meter = GasMeter::new(gas_limit); + let schedule = T::Schedule::get(); + let result = ExecStack::>::run_call( + origin, + dest, + &mut gas_meter, + &schedule, + value, + data, + debug_message, + ); + InternalCallOutput { gas_meter, result } + } + + /// Internal function that does the actual instantiation. + /// + /// Called by dispatchables and public functions. + fn internal_instantiate( + origin: T::AccountId, + endowment: BalanceOf, + gas_limit: Weight, + code: Code>, + data: Vec, + salt: Vec, + debug_message: Option<&mut Vec>, + ) -> InternalInstantiateOutput { + let mut gas_meter = GasMeter::new(gas_limit); + let schedule = T::Schedule::get(); + let try_exec = || { + let executable = match code { + Code::Upload(Bytes(binary)) => { + ensure!( + binary.len() as u32 <= schedule.limits.code_len, + >::CodeTooLarge + ); + let executable = PrefabWasmModule::from_code(binary, &schedule)?; + ensure!( + executable.code_len() <= schedule.limits.code_len, + >::CodeTooLarge + ); + executable + }, + Code::Existing(hash) => + PrefabWasmModule::from_storage(hash, &schedule, &mut gas_meter)?, + }; + ExecStack::>::run_instantiate( + origin, + executable, + &mut gas_meter, + &schedule, + endowment, + data, + &salt, + debug_message, + ) + }; + InternalInstantiateOutput { result: try_exec(), gas_meter } + } } diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index f5b95c192c42e..310c1d4cb2dd9 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -444,6 +444,11 @@ fn instantiate_and_call_and_deposit_event() { assert_eq!( System::events(), vec![ + EventRecord { + phase: Phase::Initialization, + event: Event::Balances(pallet_balances::Event::Deposit(ALICE, 1_000_000)), + topics: vec![], + }, EventRecord { phase: Phase::Initialization, event: Event::System(frame_system::Event::NewAccount(ALICE.clone())), @@ -478,20 +483,25 @@ fn instantiate_and_call_and_deposit_event() { }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts(crate::Event::CodeStored(code_hash.into())), + event: Event::Contracts(crate::Event::CodeStored { + code_hash: code_hash.into() + }), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts(crate::Event::ContractEmitted( - addr.clone(), - vec![1, 2, 3, 4] - )), + event: Event::Contracts(crate::Event::ContractEmitted { + contract: addr.clone(), + data: vec![1, 2, 3, 4] + }), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts(crate::Event::Instantiated(ALICE, addr.clone())), + event: Event::Contracts(crate::Event::Instantiated { + deployer: ALICE, + contract: addr.clone() + }), topics: vec![], }, ] @@ -764,12 +774,15 @@ fn self_destruct_works() { }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts(crate::Event::CodeRemoved(code_hash)), + event: Event::Contracts(crate::Event::CodeRemoved { code_hash }), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::Contracts(crate::Event::Terminated(addr.clone(), DJANGO)), + event: Event::Contracts(crate::Event::Terminated { + contract: addr.clone(), + beneficiary: DJANGO + }), topics: vec![], }, ], diff --git a/frame/contracts/src/wasm/code_cache.rs b/frame/contracts/src/wasm/code_cache.rs index 08a7449683ed6..afb68d4d81179 100644 --- a/frame/contracts/src/wasm/code_cache.rs +++ b/frame/contracts/src/wasm/code_cache.rs @@ -59,7 +59,7 @@ where Some(module) => increment_64(&mut module.refcount), None => { *existing = Some(prefab_module); - Contracts::::deposit_event(Event::CodeStored(code_hash)) + Contracts::::deposit_event(Event::CodeStored { code_hash }) }, }); } @@ -170,7 +170,7 @@ where T::AccountId: UncheckedFrom + AsRef<[u8]>, { >::remove(code_hash); - Contracts::::deposit_event(Event::CodeRemoved(code_hash)) + Contracts::::deposit_event(Event::CodeRemoved { code_hash }) } /// Increment the refcount panicking if it should ever overflow (which will not happen). diff --git a/frame/democracy/Cargo.toml b/frame/democracy/Cargo.toml index 94719553e28aa..f0bf2109be065 100644 --- a/frame/democracy/Cargo.toml +++ b/frame/democracy/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet for democracy" readme = "README.md" diff --git a/frame/democracy/src/benchmarking.rs b/frame/democracy/src/benchmarking.rs index 7d4d7aee140b9..34bcb0da301e6 100644 --- a/frame/democracy/src/benchmarking.rs +++ b/frame/democracy/src/benchmarking.rs @@ -19,9 +19,10 @@ use super::*; -use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelist_account}; +use frame_benchmarking::{account, benchmarks, whitelist_account}; use frame_support::{ assert_noop, assert_ok, + codec::Decode, traits::{ schedule::DispatchTime, Currency, EnsureOrigin, Get, OnInitialize, UnfilteredDispatchable, }, @@ -69,7 +70,7 @@ fn add_referendum(n: u32) -> Result { let referendum_index: ReferendumIndex = ReferendumCount::::get() - 1; T::Scheduler::schedule_named( (DEMOCRACY_ID, referendum_index).encode(), - DispatchTime::At(1u32.into()), + DispatchTime::At(2u32.into()), None, 63, frame_system::RawOrigin::Root.into(), @@ -194,9 +195,8 @@ benchmarks! { emergency_cancel { let origin = T::CancellationOrigin::successful_origin(); let referendum_index = add_referendum::(0)?; - let call = Call::::emergency_cancel { ref_index: referendum_index }; assert_ok!(Democracy::::referendum_status(referendum_index)); - }: { call.dispatch_bypass_filter(origin)? } + }: _(origin, referendum_index) verify { // Referendum has been canceled assert_noop!( @@ -219,14 +219,11 @@ benchmarks! { assert_ok!( Democracy::::external_propose(T::ExternalOrigin::successful_origin(), hash.clone()) ); - + let origin = T::BlacklistOrigin::successful_origin(); // Add a referendum of our proposal. let referendum_index = add_referendum::(0)?; assert_ok!(Democracy::::referendum_status(referendum_index)); - - let call = Call::::blacklist { proposal_hash: hash, maybe_ref_index: Some(referendum_index) }; - let origin = T::BlacklistOrigin::successful_origin(); - }: { call.dispatch_bypass_filter(origin)? } + }: _(origin, hash, Some(referendum_index)) verify { // Referendum has been canceled assert_noop!( @@ -246,9 +243,7 @@ benchmarks! { proposal_hash, (T::BlockNumber::zero(), vec![T::AccountId::default(); v as usize]) ); - - let call = Call::::external_propose { proposal_hash }; - }: { call.dispatch_bypass_filter(origin)? } + }: _(origin, proposal_hash) verify { // External proposal created ensure!(>::exists(), "External proposal didn't work"); @@ -257,8 +252,7 @@ benchmarks! { external_propose_majority { let origin = T::ExternalMajorityOrigin::successful_origin(); let proposal_hash = T::Hashing::hash_of(&0); - let call = Call::::external_propose_majority { proposal_hash }; - }: { call.dispatch_bypass_filter(origin)? } + }: _(origin, proposal_hash) verify { // External proposal created ensure!(>::exists(), "External proposal didn't work"); @@ -267,8 +261,7 @@ benchmarks! { external_propose_default { let origin = T::ExternalDefaultOrigin::successful_origin(); let proposal_hash = T::Hashing::hash_of(&0); - let call = Call::::external_propose_default { proposal_hash }; - }: { call.dispatch_bypass_filter(origin)? } + }: _(origin, proposal_hash) verify { // External proposal created ensure!(>::exists(), "External proposal didn't work"); @@ -283,13 +276,7 @@ benchmarks! { let origin_fast_track = T::FastTrackOrigin::successful_origin(); let voting_period = T::FastTrackVotingPeriod::get(); let delay = 0u32; - let call = Call::::fast_track { - proposal_hash, - voting_period: voting_period.into(), - delay: delay.into() - }; - - }: { call.dispatch_bypass_filter(origin_fast_track)? } + }: _(origin_fast_track, proposal_hash, voting_period.into(), delay.into()) verify { assert_eq!(Democracy::::referendum_count(), 1, "referendum not created") } @@ -310,10 +297,9 @@ benchmarks! { vetoers.sort(); Blacklist::::insert(proposal_hash, (T::BlockNumber::zero(), vetoers)); - let call = Call::::veto_external { proposal_hash }; let origin = T::VetoOrigin::successful_origin(); ensure!(NextExternal::::get().is_some(), "no external proposal"); - }: { call.dispatch_bypass_filter(origin)? } + }: _(origin, proposal_hash) verify { assert!(NextExternal::::get().is_none()); let (_, new_vetoers) = >::get(&proposal_hash).ok_or("no blacklist")?; @@ -436,7 +422,39 @@ benchmarks! { assert_eq!(Democracy::::referendum_count(), r, "referenda not created"); assert_eq!(Democracy::::lowest_unbaked(), 0, "invalid referenda init"); - }: { Democracy::::on_initialize(0u32.into()) } + }: { Democracy::::on_initialize(1u32.into()) } + verify { + // All should be on going + for i in 0 .. r { + if let Some(value) = ReferendumInfoOf::::get(i) { + match value { + ReferendumInfo::Finished { .. } => return Err("Referendum has been finished".into()), + ReferendumInfo::Ongoing(_) => (), + } + } + } + } + + on_initialize_base_with_launch_period { + let r in 1 .. MAX_REFERENDUMS; + + for i in 0..r { + add_referendum::(i)?; + } + + for (key, mut info) in ReferendumInfoOf::::iter() { + if let ReferendumInfo::Ongoing(ref mut status) = info { + status.end += 100u32.into(); + } + ReferendumInfoOf::::insert(key, info); + } + + assert_eq!(Democracy::::referendum_count(), r, "referenda not created"); + assert_eq!(Democracy::::lowest_unbaked(), 0, "invalid referenda init"); + + let block_number = T::LaunchPeriod::get(); + + }: { Democracy::::on_initialize(block_number) } verify { // All should be on going for i in 0 .. r { @@ -774,12 +792,20 @@ benchmarks! { Some(PreimageStatus::Available { .. }) => (), _ => return Err("preimage not available".into()) } + let origin = RawOrigin::Root.into(); + let call = Call::::enact_proposal { proposal_hash, index: 0 }.encode(); }: { assert_eq!( - Democracy::::enact_proposal(RawOrigin::Root.into(), proposal_hash, 0), + as Decode>::decode(&mut &*call) + .expect("call is encoded above, encoding must be correct") + .dispatch_bypass_filter(origin), Err(Error::::PreimageInvalid.into()) ); } -} -impl_benchmark_test_suite!(Democracy, crate::tests::new_test_ext(), crate::tests::Test); + impl_benchmark_test_suite!( + Democracy, + crate::tests::new_test_ext(), + crate::tests::Test + ); +} diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index 8bc6921c4f8ad..893e4676bef7b 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -149,7 +149,7 @@ //! - `cancel_queued` - Cancels a proposal that is queued for enactment. //! - `clear_public_proposal` - Removes all public proposals. -#![recursion_limit = "128"] +#![recursion_limit = "256"] #![cfg_attr(not(feature = "std"), no_std)] use codec::{Decode, Encode, Input}; @@ -613,10 +613,7 @@ pub mod pallet { impl Hooks> for Pallet { /// Weight: see `begin_block` fn on_initialize(n: T::BlockNumber) -> Weight { - Self::begin_block(n).unwrap_or_else(|e| { - sp_runtime::print(e); - 0 - }) + Self::begin_block(n) } } @@ -1682,7 +1679,7 @@ impl Pallet { now: T::BlockNumber, index: ReferendumIndex, status: ReferendumStatus>, - ) -> Result { + ) -> bool { let total_issuance = T::Currency::total_issuance(); let approved = status.threshold.approved(status.tally, total_issuance); @@ -1719,44 +1716,68 @@ impl Pallet { Self::deposit_event(Event::::NotPassed(index)); } - Ok(approved) + approved } /// Current era is ending; we should finish up any proposals. /// /// /// # - /// If a referendum is launched or maturing, this will take full block weight. Otherwise: + /// If a referendum is launched or maturing, this will take full block weight if queue is not + /// empty. Otherwise: /// - Complexity: `O(R)` where `R` is the number of unbaked referenda. /// - Db reads: `LastTabledWasExternal`, `NextExternal`, `PublicProps`, `account`, /// `ReferendumCount`, `LowestUnbaked` /// - Db writes: `PublicProps`, `account`, `ReferendumCount`, `DepositOf`, `ReferendumInfoOf` /// - Db reads per R: `DepositOf`, `ReferendumInfoOf` /// # - fn begin_block(now: T::BlockNumber) -> Result { + fn begin_block(now: T::BlockNumber) -> Weight { let max_block_weight = T::BlockWeights::get().max_block; let mut weight = 0; + let next = Self::lowest_unbaked(); + let last = Self::referendum_count(); + let r = last.saturating_sub(next); + // pick out another public referendum if it's time. if (now % T::LaunchPeriod::get()).is_zero() { - // Errors come from the queue being empty. we don't really care about that, and even if - // we did, there is nothing we can do here. - let _ = Self::launch_next(now); - weight = max_block_weight; + // Errors come from the queue being empty. If the queue is not empty, it will take + // full block weight. + if Self::launch_next(now).is_ok() { + weight = max_block_weight; + } else { + weight = + weight.saturating_add(T::WeightInfo::on_initialize_base_with_launch_period(r)); + } + } else { + weight = weight.saturating_add(T::WeightInfo::on_initialize_base(r)); } - let next = Self::lowest_unbaked(); - let last = Self::referendum_count(); - let r = last.saturating_sub(next); - weight = weight.saturating_add(T::WeightInfo::on_initialize_base(r)); // tally up votes for any expiring referenda. for (index, info) in Self::maturing_referenda_at_inner(now, next..last).into_iter() { - let approved = Self::bake_referendum(now, index, info)?; + let approved = Self::bake_referendum(now, index, info); ReferendumInfoOf::::insert(index, ReferendumInfo::Finished { end: now, approved }); weight = max_block_weight; } - Ok(weight) + // Notes: + // * We don't consider the lowest unbaked to be the last maturing in case some refendum have + // longer voting period than others. + // * The iteration here shouldn't trigger any storage read that are not in cache, due to + // `maturing_referenda_at_inner` having already read them. + // * We shouldn't iterate more than `LaunchPeriod/VotingPeriod + 1` times because the number + // of unbaked referendum is bounded by this number. In case those number have changed in a + // runtime upgrade the formula should be adjusted but the bound should still be sensible. + >::mutate(|ref_index| { + while *ref_index < last && + Self::referendum_info(*ref_index) + .map_or(true, |info| matches!(info, ReferendumInfo::Finished { .. })) + { + *ref_index += 1 + } + }); + + weight } /// Reads the length of account in DepositOf without getting the complete value in the runtime. diff --git a/frame/democracy/src/tests.rs b/frame/democracy/src/tests.rs index 75104db51b971..f56667e9094b3 100644 --- a/frame/democracy/src/tests.rs +++ b/frame/democracy/src/tests.rs @@ -264,7 +264,7 @@ fn propose_set_balance_and_note(who: u64, value: u64, delay: u64) -> DispatchRes fn next_block() { System::set_block_number(System::block_number() + 1); Scheduler::on_initialize(System::block_number()); - assert!(Democracy::begin_block(System::block_number()).is_ok()); + Democracy::begin_block(System::block_number()); } fn fast_forward_to(n: u64) { diff --git a/frame/democracy/src/tests/cancellation.rs b/frame/democracy/src/tests/cancellation.rs index c2bd725ce934a..83822bf51829f 100644 --- a/frame/democracy/src/tests/cancellation.rs +++ b/frame/democracy/src/tests/cancellation.rs @@ -30,10 +30,14 @@ fn cancel_referendum_should_work() { ); assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); assert_ok!(Democracy::cancel_referendum(Origin::root(), r.into())); + assert_eq!(Democracy::lowest_unbaked(), 0); next_block(); + next_block(); + assert_eq!(Democracy::lowest_unbaked(), 1); + assert_eq!(Democracy::lowest_unbaked(), Democracy::referendum_count()); assert_eq!(Balances::free_balance(42), 0); }); } diff --git a/frame/democracy/src/tests/scheduling.rs b/frame/democracy/src/tests/scheduling.rs index 06b492bc6093c..5c857a632b97b 100644 --- a/frame/democracy/src/tests/scheduling.rs +++ b/frame/democracy/src/tests/scheduling.rs @@ -30,8 +30,10 @@ fn simple_passing_should_work() { ); assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); assert_eq!(tally(r), Tally { ayes: 1, nays: 0, turnout: 10 }); + assert_eq!(Democracy::lowest_unbaked(), 0); next_block(); next_block(); + assert_eq!(Democracy::lowest_unbaked(), 1); assert_eq!(Balances::free_balance(42), 2); }); } @@ -110,3 +112,45 @@ fn delayed_enactment_should_work() { assert_eq!(Balances::free_balance(42), 2); }); } + +#[test] +fn lowest_unbaked_should_be_sensible() { + new_test_ext().execute_with(|| { + let r1 = Democracy::inject_referendum( + 3, + set_balance_proposal_hash_and_note(1), + VoteThreshold::SuperMajorityApprove, + 0, + ); + let r2 = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SuperMajorityApprove, + 0, + ); + let r3 = Democracy::inject_referendum( + 10, + set_balance_proposal_hash_and_note(3), + VoteThreshold::SuperMajorityApprove, + 0, + ); + assert_ok!(Democracy::vote(Origin::signed(1), r1, aye(1))); + assert_ok!(Democracy::vote(Origin::signed(1), r2, aye(1))); + // r3 is canceled + assert_ok!(Democracy::cancel_referendum(Origin::root(), r3.into())); + assert_eq!(Democracy::lowest_unbaked(), 0); + + next_block(); + + // r2 is approved + assert_eq!(Balances::free_balance(42), 2); + assert_eq!(Democracy::lowest_unbaked(), 0); + + next_block(); + + // r1 is approved + assert_eq!(Balances::free_balance(42), 1); + assert_eq!(Democracy::lowest_unbaked(), 3); + assert_eq!(Democracy::lowest_unbaked(), Democracy::referendum_count()); + }); +} diff --git a/frame/democracy/src/weights.rs b/frame/democracy/src/weights.rs index e3f22f4fc0ab3..638852d3c7e19 100644 --- a/frame/democracy/src/weights.rs +++ b/frame/democracy/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_democracy //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2021-08-07, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2021-09-30, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -60,6 +60,7 @@ pub trait WeightInfo { fn cancel_referendum() -> Weight; fn cancel_queued(r: u32, ) -> Weight; fn on_initialize_base(r: u32, ) -> Weight; + fn on_initialize_base_with_launch_period(r: u32, ) -> Weight; fn delegate(r: u32, ) -> Weight; fn undelegate(r: u32, ) -> Weight; fn clear_public_proposals() -> Weight; @@ -80,15 +81,15 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy Blacklist (r:1 w:0) // Storage: Democracy DepositOf (r:0 w:1) fn propose() -> Weight { - (65_665_000 as Weight) + (67_388_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Democracy DepositOf (r:1 w:1) fn second(s: u32, ) -> Weight { - (40_003_000 as Weight) - // Standard Error: 1_000 - .saturating_add((180_000 as Weight).saturating_mul(s as Weight)) + (41_157_000 as Weight) + // Standard Error: 0 + .saturating_add((157_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -96,9 +97,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vote_new(r: u32, ) -> Weight { - (45_465_000 as Weight) + (46_406_000 as Weight) // Standard Error: 1_000 - .saturating_add((220_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((170_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } @@ -106,16 +107,16 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vote_existing(r: u32, ) -> Weight { - (45_112_000 as Weight) + (46_071_000 as Weight) // Standard Error: 1_000 - .saturating_add((222_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((166_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy Cancellations (r:1 w:1) fn emergency_cancel() -> Weight { - (26_651_000 as Weight) + (27_699_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } @@ -126,45 +127,45 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) fn blacklist(p: u32, ) -> Weight { - (77_737_000 as Weight) + (82_703_000 as Weight) // Standard Error: 4_000 - .saturating_add((512_000 as Weight).saturating_mul(p as Weight)) + .saturating_add((500_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(6 as Weight)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy Blacklist (r:1 w:0) fn external_propose(v: u32, ) -> Weight { - (13_126_000 as Weight) + (13_747_000 as Weight) // Standard Error: 0 - .saturating_add((89_000 as Weight).saturating_mul(v as Weight)) + .saturating_add((76_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Democracy NextExternal (r:0 w:1) fn external_propose_majority() -> Weight { - (2_923_000 as Weight) + (3_070_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Democracy NextExternal (r:0 w:1) fn external_propose_default() -> Weight { - (2_889_000 as Weight) + (3_080_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:1) // Storage: Democracy ReferendumInfoOf (r:0 w:1) fn fast_track() -> Weight { - (27_598_000 as Weight) + (29_129_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy Blacklist (r:1 w:1) fn veto_external(v: u32, ) -> Weight { - (28_416_000 as Weight) + (30_105_000 as Weight) // Standard Error: 0 - .saturating_add((132_000 as Weight).saturating_mul(v as Weight)) + .saturating_add((104_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } @@ -172,36 +173,46 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) fn cancel_proposal(p: u32, ) -> Weight { - (52_836_000 as Weight) - // Standard Error: 2_000 - .saturating_add((478_000 as Weight).saturating_mul(p as Weight)) + (55_228_000 as Weight) + // Standard Error: 1_000 + .saturating_add((457_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Democracy ReferendumInfoOf (r:0 w:1) fn cancel_referendum() -> Weight { - (16_891_000 as Weight) + (17_319_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Scheduler Lookup (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn cancel_queued(r: u32, ) -> Weight { - (30_504_000 as Weight) - // Standard Error: 2_000 - .saturating_add((1_480_000 as Weight).saturating_mul(r as Weight)) + (29_738_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_153_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } - // Storage: Democracy LastTabledWasExternal (r:1 w:0) - // Storage: Democracy NextExternal (r:1 w:0) - // Storage: Democracy PublicProps (r:1 w:0) // Storage: Democracy LowestUnbaked (r:1 w:0) // Storage: Democracy ReferendumCount (r:1 w:0) // Storage: Democracy ReferendumInfoOf (r:1 w:0) fn on_initialize_base(r: u32, ) -> Weight { - (6_259_000 as Weight) + (2_165_000 as Weight) + // Standard Error: 3_000 + .saturating_add((5_577_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) + } + // Storage: Democracy LowestUnbaked (r:1 w:0) + // Storage: Democracy ReferendumCount (r:1 w:0) + // Storage: Democracy LastTabledWasExternal (r:1 w:0) + // Storage: Democracy NextExternal (r:1 w:0) + // Storage: Democracy PublicProps (r:1 w:0) + // Storage: Democracy ReferendumInfoOf (r:1 w:0) + fn on_initialize_base_with_launch_period(r: u32, ) -> Weight { + (9_396_000 as Weight) // Standard Error: 4_000 - .saturating_add((5_032_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((5_604_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) } @@ -209,9 +220,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn delegate(r: u32, ) -> Weight { - (51_719_000 as Weight) - // Standard Error: 5_000 - .saturating_add((7_210_000 as Weight).saturating_mul(r as Weight)) + (57_783_000 as Weight) + // Standard Error: 4_000 + .saturating_add((7_623_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(4 as Weight)) @@ -220,9 +231,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Democracy VotingOf (r:2 w:2) // Storage: Democracy ReferendumInfoOf (r:1 w:1) fn undelegate(r: u32, ) -> Weight { - (23_203_000 as Weight) - // Standard Error: 5_000 - .saturating_add((7_206_000 as Weight).saturating_mul(r as Weight)) + (26_027_000 as Weight) + // Standard Error: 4_000 + .saturating_add((7_593_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) @@ -230,31 +241,31 @@ impl WeightInfo for SubstrateWeight { } // Storage: Democracy PublicProps (r:0 w:1) fn clear_public_proposals() -> Weight { - (3_127_000 as Weight) + (2_780_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Democracy Preimages (r:1 w:1) fn note_preimage(b: u32, ) -> Weight { - (44_130_000 as Weight) + (46_416_000 as Weight) // Standard Error: 0 - .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) + .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Democracy Preimages (r:1 w:1) fn note_imminent_preimage(b: u32, ) -> Weight { - (28_756_000 as Weight) + (29_735_000 as Weight) // Standard Error: 0 - .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) + .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Democracy Preimages (r:1 w:1) // Storage: System Account (r:1 w:0) fn reap_preimage(b: u32, ) -> Weight { - (39_922_000 as Weight) + (41_276_000 as Weight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) + .saturating_add((1_000 as Weight).saturating_mul(b as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -262,9 +273,9 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn unlock_remove(r: u32, ) -> Weight { - (38_621_000 as Weight) + (40_348_000 as Weight) // Standard Error: 1_000 - .saturating_add((110_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((60_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } @@ -272,27 +283,27 @@ impl WeightInfo for SubstrateWeight { // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn unlock_set(r: u32, ) -> Weight { - (36_631_000 as Weight) + (37_475_000 as Weight) // Standard Error: 1_000 - .saturating_add((214_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((151_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) fn remove_vote(r: u32, ) -> Weight { - (21_025_000 as Weight) + (19_970_000 as Weight) // Standard Error: 1_000 - .saturating_add((195_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((153_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) fn remove_other_vote(r: u32, ) -> Weight { - (20_628_000 as Weight) + (20_094_000 as Weight) // Standard Error: 1_000 - .saturating_add((214_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((157_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } @@ -305,15 +316,15 @@ impl WeightInfo for () { // Storage: Democracy Blacklist (r:1 w:0) // Storage: Democracy DepositOf (r:0 w:1) fn propose() -> Weight { - (65_665_000 as Weight) + (67_388_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Democracy DepositOf (r:1 w:1) fn second(s: u32, ) -> Weight { - (40_003_000 as Weight) - // Standard Error: 1_000 - .saturating_add((180_000 as Weight).saturating_mul(s as Weight)) + (41_157_000 as Weight) + // Standard Error: 0 + .saturating_add((157_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -321,9 +332,9 @@ impl WeightInfo for () { // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vote_new(r: u32, ) -> Weight { - (45_465_000 as Weight) + (46_406_000 as Weight) // Standard Error: 1_000 - .saturating_add((220_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((170_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } @@ -331,16 +342,16 @@ impl WeightInfo for () { // Storage: Democracy VotingOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn vote_existing(r: u32, ) -> Weight { - (45_112_000 as Weight) + (46_071_000 as Weight) // Standard Error: 1_000 - .saturating_add((222_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((166_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy Cancellations (r:1 w:1) fn emergency_cancel() -> Weight { - (26_651_000 as Weight) + (27_699_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } @@ -351,45 +362,45 @@ impl WeightInfo for () { // Storage: Democracy DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) fn blacklist(p: u32, ) -> Weight { - (77_737_000 as Weight) + (82_703_000 as Weight) // Standard Error: 4_000 - .saturating_add((512_000 as Weight).saturating_mul(p as Weight)) + .saturating_add((500_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(6 as Weight)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy Blacklist (r:1 w:0) fn external_propose(v: u32, ) -> Weight { - (13_126_000 as Weight) + (13_747_000 as Weight) // Standard Error: 0 - .saturating_add((89_000 as Weight).saturating_mul(v as Weight)) + .saturating_add((76_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Democracy NextExternal (r:0 w:1) fn external_propose_majority() -> Weight { - (2_923_000 as Weight) + (3_070_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Democracy NextExternal (r:0 w:1) fn external_propose_default() -> Weight { - (2_889_000 as Weight) + (3_080_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy ReferendumCount (r:1 w:1) // Storage: Democracy ReferendumInfoOf (r:0 w:1) fn fast_track() -> Weight { - (27_598_000 as Weight) + (29_129_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Democracy NextExternal (r:1 w:1) // Storage: Democracy Blacklist (r:1 w:1) fn veto_external(v: u32, ) -> Weight { - (28_416_000 as Weight) + (30_105_000 as Weight) // Standard Error: 0 - .saturating_add((132_000 as Weight).saturating_mul(v as Weight)) + .saturating_add((104_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } @@ -397,36 +408,46 @@ impl WeightInfo for () { // Storage: Democracy DepositOf (r:1 w:1) // Storage: System Account (r:1 w:1) fn cancel_proposal(p: u32, ) -> Weight { - (52_836_000 as Weight) - // Standard Error: 2_000 - .saturating_add((478_000 as Weight).saturating_mul(p as Weight)) + (55_228_000 as Weight) + // Standard Error: 1_000 + .saturating_add((457_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Democracy ReferendumInfoOf (r:0 w:1) fn cancel_referendum() -> Weight { - (16_891_000 as Weight) + (17_319_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Scheduler Lookup (r:1 w:1) // Storage: Scheduler Agenda (r:1 w:1) fn cancel_queued(r: u32, ) -> Weight { - (30_504_000 as Weight) - // Standard Error: 2_000 - .saturating_add((1_480_000 as Weight).saturating_mul(r as Weight)) + (29_738_000 as Weight) + // Standard Error: 1_000 + .saturating_add((1_153_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } - // Storage: Democracy LastTabledWasExternal (r:1 w:0) - // Storage: Democracy NextExternal (r:1 w:0) - // Storage: Democracy PublicProps (r:1 w:0) // Storage: Democracy LowestUnbaked (r:1 w:0) // Storage: Democracy ReferendumCount (r:1 w:0) // Storage: Democracy ReferendumInfoOf (r:1 w:0) fn on_initialize_base(r: u32, ) -> Weight { - (6_259_000 as Weight) + (2_165_000 as Weight) + // Standard Error: 3_000 + .saturating_add((5_577_000 as Weight).saturating_mul(r as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) + } + // Storage: Democracy LowestUnbaked (r:1 w:0) + // Storage: Democracy ReferendumCount (r:1 w:0) + // Storage: Democracy LastTabledWasExternal (r:1 w:0) + // Storage: Democracy NextExternal (r:1 w:0) + // Storage: Democracy PublicProps (r:1 w:0) + // Storage: Democracy ReferendumInfoOf (r:1 w:0) + fn on_initialize_base_with_launch_period(r: u32, ) -> Weight { + (9_396_000 as Weight) // Standard Error: 4_000 - .saturating_add((5_032_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((5_604_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) } @@ -434,9 +455,9 @@ impl WeightInfo for () { // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Balances Locks (r:1 w:1) fn delegate(r: u32, ) -> Weight { - (51_719_000 as Weight) - // Standard Error: 5_000 - .saturating_add((7_210_000 as Weight).saturating_mul(r as Weight)) + (57_783_000 as Weight) + // Standard Error: 4_000 + .saturating_add((7_623_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) @@ -445,9 +466,9 @@ impl WeightInfo for () { // Storage: Democracy VotingOf (r:2 w:2) // Storage: Democracy ReferendumInfoOf (r:1 w:1) fn undelegate(r: u32, ) -> Weight { - (23_203_000 as Weight) - // Standard Error: 5_000 - .saturating_add((7_206_000 as Weight).saturating_mul(r as Weight)) + (26_027_000 as Weight) + // Standard Error: 4_000 + .saturating_add((7_593_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) @@ -455,31 +476,31 @@ impl WeightInfo for () { } // Storage: Democracy PublicProps (r:0 w:1) fn clear_public_proposals() -> Weight { - (3_127_000 as Weight) + (2_780_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Democracy Preimages (r:1 w:1) fn note_preimage(b: u32, ) -> Weight { - (44_130_000 as Weight) + (46_416_000 as Weight) // Standard Error: 0 - .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) + .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Democracy Preimages (r:1 w:1) fn note_imminent_preimage(b: u32, ) -> Weight { - (28_756_000 as Weight) + (29_735_000 as Weight) // Standard Error: 0 - .saturating_add((3_000 as Weight).saturating_mul(b as Weight)) + .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: Democracy Preimages (r:1 w:1) // Storage: System Account (r:1 w:0) fn reap_preimage(b: u32, ) -> Weight { - (39_922_000 as Weight) + (41_276_000 as Weight) // Standard Error: 0 - .saturating_add((2_000 as Weight).saturating_mul(b as Weight)) + .saturating_add((1_000 as Weight).saturating_mul(b as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -487,9 +508,9 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn unlock_remove(r: u32, ) -> Weight { - (38_621_000 as Weight) + (40_348_000 as Weight) // Standard Error: 1_000 - .saturating_add((110_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((60_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } @@ -497,27 +518,27 @@ impl WeightInfo for () { // Storage: Balances Locks (r:1 w:1) // Storage: System Account (r:1 w:1) fn unlock_set(r: u32, ) -> Weight { - (36_631_000 as Weight) + (37_475_000 as Weight) // Standard Error: 1_000 - .saturating_add((214_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((151_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) fn remove_vote(r: u32, ) -> Weight { - (21_025_000 as Weight) + (19_970_000 as Weight) // Standard Error: 1_000 - .saturating_add((195_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((153_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: Democracy ReferendumInfoOf (r:1 w:1) // Storage: Democracy VotingOf (r:1 w:1) fn remove_other_vote(r: u32, ) -> Weight { - (20_628_000 as Weight) + (20_094_000 as Weight) // Standard Error: 1_000 - .saturating_add((214_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((157_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } diff --git a/frame/election-provider-multi-phase/Cargo.toml b/frame/election-provider-multi-phase/Cargo.toml index b2d50321e8cd3..3c6b405c331f0 100644 --- a/frame/election-provider-multi-phase/Cargo.toml +++ b/frame/election-provider-multi-phase/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "PALLET two phase election providers" readme = "README.md" diff --git a/frame/election-provider-multi-phase/src/benchmarking.rs b/frame/election-provider-multi-phase/src/benchmarking.rs index fb5adda52e166..9648b8e0f2465 100644 --- a/frame/election-provider-multi-phase/src/benchmarking.rs +++ b/frame/election-provider-multi-phase/src/benchmarking.rs @@ -19,7 +19,7 @@ use super::*; use crate::{unsigned::IndexAssignmentOf, Pallet as MultiPhase}; -use frame_benchmarking::{account, impl_benchmark_test_suite}; +use frame_benchmarking::account; use frame_support::{assert_ok, traits::Hooks}; use frame_system::RawOrigin; use rand::{prelude::SliceRandom, rngs::SmallRng, SeedableRng}; @@ -243,10 +243,10 @@ frame_benchmarking::benchmarks! { } create_snapshot_internal { - // number of votes in snapshot. Fixed to maximum. - let v = T::BenchmarkingConfig::SNAPSHOT_MAXIMUM_VOTERS; - // number of targets in snapshot. Fixed to maximum. - let t = T::BenchmarkingConfig::MAXIMUM_TARGETS; + // number of votes in snapshot. + let v in (T::BenchmarkingConfig::VOTERS[0]) .. T::BenchmarkingConfig::VOTERS[1]; + // number of targets in snapshot. + let t in (T::BenchmarkingConfig::TARGETS[0]) .. T::BenchmarkingConfig::TARGETS[1]; // we don't directly need the data-provider to be populated, but it is just easy to use it. set_up_data_provider::(v, t); @@ -350,25 +350,8 @@ frame_benchmarking::benchmarks! { assert!(>::queued_solution().is_none()); >::put(Phase::Unsigned((true, 1u32.into()))); - - // encode the most significant storage item that needs to be decoded in the dispatch. - let encoded_snapshot = >::snapshot().ok_or("missing snapshot")?.encode(); - let encoded_call = Call::::submit_unsigned { - raw_solution: Box::new(raw_solution.clone()), - witness - }.encode(); - }: { - assert_ok!( - >::submit_unsigned( - RawOrigin::None.into(), - Box::new(raw_solution), - witness, - ) - ); - let _decoded_snap = as Decode>::decode(&mut &*encoded_snapshot) - .expect("decoding should not fail; qed."); - let _decoded_call = as Decode>::decode(&mut &*encoded_call).expect("decoding should not fail; qed."); - } verify { + }: _(RawOrigin::None, Box::new(raw_solution), witness) + verify { assert!(>::queued_solution().is_some()); } @@ -389,13 +372,8 @@ frame_benchmarking::benchmarks! { assert_eq!(raw_solution.solution.voter_count() as u32, a); assert_eq!(raw_solution.solution.unique_targets().len() as u32, d); - - // encode the most significant storage item that needs to be decoded in the dispatch. - let encoded_snapshot = >::snapshot().ok_or("snapshot missing")?.encode(); }: { assert_ok!(>::feasibility_check(raw_solution, ElectionCompute::Unsigned)); - let _decoded_snap = as Decode>::decode(&mut &*encoded_snapshot) - .expect("decoding should not fail; qed."); } // NOTE: this weight is not used anywhere, but the fact that it should succeed when execution in @@ -519,10 +497,10 @@ frame_benchmarking::benchmarks! { log!(trace, "actual encoded size = {}", encoding.len()); assert!(encoding.len() <= desired_size); } -} -impl_benchmark_test_suite!( - MultiPhase, - crate::mock::ExtBuilder::default().build_offchainify(10).0, - crate::mock::Runtime, -); + impl_benchmark_test_suite!( + MultiPhase, + crate::mock::ExtBuilder::default().build_offchainify(10).0, + crate::mock::Runtime, + ); +} diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs index e83c49433e2bb..a7863fafa7747 100644 --- a/frame/election-provider-multi-phase/src/lib.rs +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -67,11 +67,11 @@ //! origin can not bail out in any way, if their solution is queued. //! //! Upon the end of the signed phase, the solutions are examined from best to worse (i.e. `pop()`ed -//! until drained). Each solution undergoes an expensive `Pallet::feasibility_check`, which -//! ensures the score claimed by this score was correct, and it is valid based on the election data -//! (i.e. votes and candidates). At each step, if the current best solution passes the feasibility -//! check, it is considered to be the best one. The sender of the origin is rewarded, and the rest -//! of the queued solutions get their deposit back and are discarded, without being checked. +//! until drained). Each solution undergoes an expensive `Pallet::feasibility_check`, which ensures +//! the score claimed by this score was correct, and it is valid based on the election data (i.e. +//! votes and candidates). At each step, if the current best solution passes the feasibility check, +//! it is considered to be the best one. The sender of the origin is rewarded, and the rest of the +//! queued solutions get their deposit back and are discarded, without being checked. //! //! The following example covers all of the cases at the end of the signed phase: //! @@ -121,17 +121,35 @@ //! //! If, for any of the below reasons: //! -//! 1. No signed or unsigned solution submitted & Fallback is `None` or failed -//! 2. Internal error +//! 1. No **signed** or **unsigned** solution submitted, and no successful [`Config::Fallback`] is +//! provided +//! 2. Any other unforeseen internal error //! //! A call to `T::ElectionProvider::elect` is made, and `Ok(_)` cannot be returned, then the pallet //! proceeds to the [`Phase::Emergency`]. During this phase, any solution can be submitted from -//! [`Config::ForceOrigin`], without any checking. Once submitted, the forced solution is kept in -//! [`QueuedSolution`] until the next call to `T::ElectionProvider::elect`, where it is returned and -//! [`Phase`] goes back to `Off`. +//! [`Config::ForceOrigin`], without any checking, via [`Pallet::set_emergency_election_result`] +//! transaction. Hence, `[`Config::ForceOrigin`]` should only be set to a trusted origin, such as +//! the council or root. Once submitted, the forced solution is kept in [`QueuedSolution`] until the +//! next call to `T::ElectionProvider::elect`, where it is returned and [`Phase`] goes back to +//! `Off`. //! //! This implies that the user of this pallet (i.e. a staking pallet) should re-try calling -//! `T::ElectionProvider::elect` in case of error until `OK(_)` is returned. +//! `T::ElectionProvider::elect` in case of error, until `OK(_)` is returned. +//! +//! To generate an emergency solution, one must only provide one argument: [`Supports`]. This is +//! essentially a collection of elected winners for the election, and voters who support them. The +//! supports can be generated by any means. In the simplest case, it could be manual. For example, +//! in the case of massive network failure or misbehaviour, [`Config::ForceOrigin`] might decide to +//! select only a small number of emergency winners (which would greatly restrict the next validator +//! set, if this pallet is used with `pallet-staking`). If the failure is for other technical +//! reasons, then a simple and safe way to generate supports is using the staking-miner binary +//! provided in the Polkadot repository. This binary has a subcommand named `emergency-solution` +//! which is capable of connecting to a live network, and generating appropriate `supports` using a +//! standard algorithm, and outputting the `supports` in hex format, ready for submission. Note that +//! while this binary lives in the Polkadot repository, this particular subcommand of it can work +//! against any substrate based-chain. +//! +//! See the `staking-miner` documentation in the Polkadot repository for more information. //! //! ## Feasible Solution (correct solution) //! @@ -146,16 +164,16 @@ //! //! ## Accuracy //! -//! The accuracy of the election is configured via -//! [`SolutionAccuracyOf`] which is the accuracy that the submitted solutions must adhere to. +//! The accuracy of the election is configured via [`SolutionAccuracyOf`] which is the accuracy that +//! the submitted solutions must adhere to. //! //! Note that the accuracy is of great importance. The offchain solution should be as small as //! possible, reducing solutions size/weight. //! //! ## Error types //! -//! This pallet provides a verbose error system to ease future debugging and debugging. The -//! overall hierarchy of errors is as follows: +//! This pallet provides a verbose error system to ease future debugging and debugging. The overall +//! hierarchy of errors is as follows: //! //! 1. [`pallet::Error`]: These are the errors that can be returned in the dispatchables of the //! pallet, either signed or unsigned. Since decomposition with nested enums is not possible @@ -173,6 +191,9 @@ //! //! ## Future Plans //! +//! **Emergency-phase recovery script**: This script should be taken out of staking-miner in +//! polkadot and ideally live in `substrate/utils/frame/elections`. +//! //! **Challenge Phase**. We plan on adding a third phase to the pallet, called the challenge phase. //! This is a phase in which no further solutions are processed, and the current best solution might //! be challenged by anyone (signed or unsigned). The main plan here is to enforce the solution to @@ -1201,8 +1222,12 @@ impl Pallet { match current_phase { Phase::Unsigned((true, opened)) if opened == now => { // Mine a new solution, cache it, and attempt to submit it - let initial_output = Self::ensure_offchain_repeat_frequency(now) - .and_then(|_| Self::mine_check_save_submit()); + let initial_output = Self::ensure_offchain_repeat_frequency(now).and_then(|_| { + // This is executed at the beginning of each round. Any cache is now invalid. + // Clear it. + unsigned::kill_ocw_solution::(); + Self::mine_check_save_submit() + }); log!(debug, "initial offchain thread output: {:?}", initial_output); }, Phase::Unsigned((true, opened)) if opened < now => { @@ -1214,20 +1239,6 @@ impl Pallet { }, _ => {}, } - - // After election finalization, clear OCW solution storage. - // - // We can read the events here because offchain worker doesn't affect PoV. - if >::read_events_no_consensus() - .into_iter() - .filter_map(|event_record| { - let local_event = ::Event::from(event_record.event); - local_event.try_into().ok() - }) - .any(|event| matches!(event, Event::ElectionFinalized(_))) - { - unsigned::kill_ocw_solution::(); - } } /// Logic for [`::on_initialize`] when signed phase is being opened. @@ -1317,8 +1328,10 @@ impl Pallet { let (targets, voters, desired_targets) = Self::create_snapshot_external()?; // ..therefore we only measure the weight of this and add it. + let internal_weight = + T::WeightInfo::create_snapshot_internal(voters.len() as u32, targets.len() as u32); Self::create_snapshot_internal(targets, voters, desired_targets); - Self::register_weight(T::WeightInfo::create_snapshot_internal()); + Self::register_weight(internal_weight); Ok(()) } diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs index 0d563955595a8..1a65316be1f10 100644 --- a/frame/election-provider-multi-phase/src/mock.rs +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -304,11 +304,11 @@ impl multi_phase::weights::WeightInfo for DualMockWeightInfo { <() as multi_phase::weights::WeightInfo>::on_initialize_nothing() } } - fn create_snapshot_internal() -> Weight { + fn create_snapshot_internal(v: u32, t: u32) -> Weight { if MockWeightInfo::get() { Zero::zero() } else { - <() as multi_phase::weights::WeightInfo>::create_snapshot_internal() + <() as multi_phase::weights::WeightInfo>::create_snapshot_internal(v, t) } } fn on_initialize_open_signed() -> Weight { diff --git a/frame/election-provider-multi-phase/src/unsigned.rs b/frame/election-provider-multi-phase/src/unsigned.rs index af0b79177d86c..0ed9b5427b1ec 100644 --- a/frame/election-provider-multi-phase/src/unsigned.rs +++ b/frame/election-provider-multi-phase/src/unsigned.rs @@ -651,7 +651,7 @@ mod max_weight { fn elect_queued(a: u32, d: u32) -> Weight { unreachable!() } - fn create_snapshot_internal() -> Weight { + fn create_snapshot_internal(v: u32, t: u32) -> Weight { unreachable!() } fn on_initialize_nothing() -> Weight { @@ -1241,35 +1241,62 @@ mod tests { } #[test] - fn ocw_clears_cache_after_election() { - let (mut ext, _pool) = ExtBuilder::default().build_offchainify(0); + fn ocw_clears_cache_on_unsigned_phase_open() { + let (mut ext, pool) = ExtBuilder::default().build_offchainify(0); ext.execute_with(|| { - roll_to(25); - assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); + const BLOCK: u64 = 25; + let block_plus = |delta: u64| BLOCK + delta; + let offchain_repeat = ::OffchainRepeat::get(); - // we must clear the offchain storage to ensure the offchain execution check doesn't get - // in the way. - let mut storage = StorageValueRef::persistent(&OFFCHAIN_LAST_BLOCK); - storage.clear(); + roll_to(BLOCK); + // we are on the first block of the unsigned phase + assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, BLOCK))); assert!( !ocw_solution_exists::(), "no solution should be present before we mine one", ); - // creates and cache a solution - MultiPhase::offchain_worker(25); + // create and cache a solution on the first block of the unsigned phase + MultiPhase::offchain_worker(BLOCK); assert!( ocw_solution_exists::(), "a solution must be cached after running the worker", ); - // after an election, the solution must be cleared + // record the submitted tx, + let tx_cache_1 = pool.read().transactions[0].clone(); + // and assume it has been processed. + pool.try_write().unwrap().transactions.clear(); + + // after an election, the solution is not cleared // we don't actually care about the result of the election - roll_to(26); let _ = MultiPhase::do_elect(); - MultiPhase::offchain_worker(26); - assert!(!ocw_solution_exists::(), "elections must clear the ocw cache"); + MultiPhase::offchain_worker(block_plus(1)); + assert!(ocw_solution_exists::(), "elections does not clear the ocw cache"); + + // submit a solution with the offchain worker after the repeat interval + MultiPhase::offchain_worker(block_plus(offchain_repeat + 1)); + + // record the submitted tx, + let tx_cache_2 = pool.read().transactions[0].clone(); + // and assume it has been processed. + pool.try_write().unwrap().transactions.clear(); + + // the OCW submitted the same solution twice since the cache was not cleared. + assert_eq!(tx_cache_1, tx_cache_2); + + let current_block = block_plus(offchain_repeat * 2 + 2); + // force the unsigned phase to start on the current block. + CurrentPhase::::set(Phase::Unsigned((true, current_block))); + + // clear the cache and create a solution since we are on the first block of the unsigned + // phase. + MultiPhase::offchain_worker(current_block); + let tx_cache_3 = pool.read().transactions[0].clone(); + + // the submitted solution changes because the cache was cleared. + assert_eq!(tx_cache_1, tx_cache_3); }) } diff --git a/frame/election-provider-multi-phase/src/weights.rs b/frame/election-provider-multi-phase/src/weights.rs index 262838bcb9e70..4d49f60fabfc3 100644 --- a/frame/election-provider-multi-phase/src/weights.rs +++ b/frame/election-provider-multi-phase/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_election_provider_multi_phase //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2021-08-18, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! DATE: 2021-09-22, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -50,7 +50,7 @@ pub trait WeightInfo { fn on_initialize_open_unsigned() -> Weight; fn finalize_signed_phase_accept_solution() -> Weight; fn finalize_signed_phase_reject_solution() -> Weight; - fn create_snapshot_internal() -> Weight; + fn create_snapshot_internal(v: u32, t: u32, ) -> Weight; fn elect_queued(a: u32, d: u32, ) -> Weight; fn submit(c: u32, ) -> Weight; fn submit_unsigned(v: u32, t: u32, a: u32, d: u32, ) -> Weight; @@ -69,41 +69,45 @@ impl WeightInfo for SubstrateWeight { // Storage: Staking ForceEra (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) fn on_initialize_nothing() -> Weight { - (23_878_000 as Weight) + (22_784_000 as Weight) .saturating_add(T::DbWeight::get().reads(8 as Weight)) } // Storage: ElectionProviderMultiPhase Round (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn on_initialize_open_signed() -> Weight { - (34_547_000 as Weight) + (32_763_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: ElectionProviderMultiPhase Round (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn on_initialize_open_unsigned() -> Weight { - (33_568_000 as Weight) + (29_117_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:1) // Storage: ElectionProviderMultiPhase QueuedSolution (r:0 w:1) fn finalize_signed_phase_accept_solution() -> Weight { - (50_596_000 as Weight) + (48_996_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } // Storage: System Account (r:1 w:1) fn finalize_signed_phase_reject_solution() -> Weight { - (33_389_000 as Weight) + (32_508_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: ElectionProviderMultiPhase SnapshotMetadata (r:0 w:1) // Storage: ElectionProviderMultiPhase DesiredTargets (r:0 w:1) // Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) - fn create_snapshot_internal() -> Weight { - (8_835_233_000 as Weight) + fn create_snapshot_internal(v: u32, t: u32, ) -> Weight { + (96_001_000 as Weight) + // Standard Error: 1_000 + .saturating_add((307_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 2_000 + .saturating_add((133_000 as Weight).saturating_mul(t as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: ElectionProviderMultiPhase SignedSubmissionIndices (r:1 w:1) @@ -116,11 +120,11 @@ impl WeightInfo for SubstrateWeight { // Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn elect_queued(a: u32, d: u32, ) -> Weight { - (82_395_000 as Weight) - // Standard Error: 1_000 - .saturating_add((1_769_000 as Weight).saturating_mul(a as Weight)) - // Standard Error: 13_000 - .saturating_add((320_000 as Weight).saturating_mul(d as Weight)) + (100_505_000 as Weight) + // Standard Error: 6_000 + .saturating_add((1_665_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 10_000 + .saturating_add((443_000 as Weight).saturating_mul(d as Weight)) .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().writes(8 as Weight)) } @@ -131,9 +135,9 @@ impl WeightInfo for SubstrateWeight { // Storage: ElectionProviderMultiPhase SignedSubmissionNextIndex (r:1 w:1) // Storage: ElectionProviderMultiPhase SignedSubmissionsMap (r:0 w:1) fn submit(c: u32, ) -> Weight { - (77_368_000 as Weight) - // Standard Error: 9_000 - .saturating_add((369_000 as Weight).saturating_mul(c as Weight)) + (74_088_000 as Weight) + // Standard Error: 59_000 + .saturating_add((187_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } @@ -146,14 +150,14 @@ impl WeightInfo for SubstrateWeight { // Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) fn submit_unsigned(v: u32, t: u32, a: u32, d: u32, ) -> Weight { (0 as Weight) - // Standard Error: 4_000 - .saturating_add((3_553_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 23_000 - .saturating_add((35_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 7_000 - .saturating_add((10_600_000 as Weight).saturating_mul(a as Weight)) - // Standard Error: 59_000 - .saturating_add((6_128_000 as Weight).saturating_mul(d as Weight)) + // Standard Error: 5_000 + .saturating_add((1_970_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 10_000 + .saturating_add((173_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 18_000 + .saturating_add((9_783_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 27_000 + .saturating_add((2_224_000 as Weight).saturating_mul(d as Weight)) .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -161,14 +165,16 @@ impl WeightInfo for SubstrateWeight { // Storage: ElectionProviderMultiPhase DesiredTargets (r:1 w:0) // Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) // Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) - fn feasibility_check(v: u32, _t: u32, a: u32, d: u32, ) -> Weight { + fn feasibility_check(v: u32, t: u32, a: u32, d: u32, ) -> Weight { (0 as Weight) // Standard Error: 3_000 - .saturating_add((3_478_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 6_000 - .saturating_add((8_930_000 as Weight).saturating_mul(a as Weight)) - // Standard Error: 47_000 - .saturating_add((5_199_000 as Weight).saturating_mul(d as Weight)) + .saturating_add((1_910_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 7_000 + .saturating_add((111_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 13_000 + .saturating_add((7_741_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 19_000 + .saturating_add((1_844_000 as Weight).saturating_mul(d as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) } } @@ -184,41 +190,45 @@ impl WeightInfo for () { // Storage: Staking ForceEra (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:1 w:0) fn on_initialize_nothing() -> Weight { - (23_878_000 as Weight) + (22_784_000 as Weight) .saturating_add(RocksDbWeight::get().reads(8 as Weight)) } // Storage: ElectionProviderMultiPhase Round (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn on_initialize_open_signed() -> Weight { - (34_547_000 as Weight) + (32_763_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: ElectionProviderMultiPhase Round (r:1 w:0) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn on_initialize_open_unsigned() -> Weight { - (33_568_000 as Weight) + (29_117_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: System Account (r:1 w:1) // Storage: ElectionProviderMultiPhase QueuedSolution (r:0 w:1) fn finalize_signed_phase_accept_solution() -> Weight { - (50_596_000 as Weight) + (48_996_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } // Storage: System Account (r:1 w:1) fn finalize_signed_phase_reject_solution() -> Weight { - (33_389_000 as Weight) + (32_508_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } // Storage: ElectionProviderMultiPhase SnapshotMetadata (r:0 w:1) // Storage: ElectionProviderMultiPhase DesiredTargets (r:0 w:1) // Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) - fn create_snapshot_internal() -> Weight { - (8_835_233_000 as Weight) + fn create_snapshot_internal(v: u32, t: u32, ) -> Weight { + (96_001_000 as Weight) + // Standard Error: 1_000 + .saturating_add((307_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 2_000 + .saturating_add((133_000 as Weight).saturating_mul(t as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } // Storage: ElectionProviderMultiPhase SignedSubmissionIndices (r:1 w:1) @@ -231,11 +241,11 @@ impl WeightInfo for () { // Storage: ElectionProviderMultiPhase Snapshot (r:0 w:1) // Storage: ElectionProviderMultiPhase CurrentPhase (r:0 w:1) fn elect_queued(a: u32, d: u32, ) -> Weight { - (82_395_000 as Weight) - // Standard Error: 1_000 - .saturating_add((1_769_000 as Weight).saturating_mul(a as Weight)) - // Standard Error: 13_000 - .saturating_add((320_000 as Weight).saturating_mul(d as Weight)) + (100_505_000 as Weight) + // Standard Error: 6_000 + .saturating_add((1_665_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 10_000 + .saturating_add((443_000 as Weight).saturating_mul(d as Weight)) .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().writes(8 as Weight)) } @@ -246,9 +256,9 @@ impl WeightInfo for () { // Storage: ElectionProviderMultiPhase SignedSubmissionNextIndex (r:1 w:1) // Storage: ElectionProviderMultiPhase SignedSubmissionsMap (r:0 w:1) fn submit(c: u32, ) -> Weight { - (77_368_000 as Weight) - // Standard Error: 9_000 - .saturating_add((369_000 as Weight).saturating_mul(c as Weight)) + (74_088_000 as Weight) + // Standard Error: 59_000 + .saturating_add((187_000 as Weight).saturating_mul(c as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } @@ -261,14 +271,14 @@ impl WeightInfo for () { // Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) fn submit_unsigned(v: u32, t: u32, a: u32, d: u32, ) -> Weight { (0 as Weight) - // Standard Error: 4_000 - .saturating_add((3_553_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 23_000 - .saturating_add((35_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 7_000 - .saturating_add((10_600_000 as Weight).saturating_mul(a as Weight)) - // Standard Error: 59_000 - .saturating_add((6_128_000 as Weight).saturating_mul(d as Weight)) + // Standard Error: 5_000 + .saturating_add((1_970_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 10_000 + .saturating_add((173_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 18_000 + .saturating_add((9_783_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 27_000 + .saturating_add((2_224_000 as Weight).saturating_mul(d as Weight)) .saturating_add(RocksDbWeight::get().reads(7 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } @@ -276,14 +286,16 @@ impl WeightInfo for () { // Storage: ElectionProviderMultiPhase DesiredTargets (r:1 w:0) // Storage: ElectionProviderMultiPhase MinimumUntrustedScore (r:1 w:0) // Storage: ElectionProviderMultiPhase Snapshot (r:1 w:0) - fn feasibility_check(v: u32, _t: u32, a: u32, d: u32, ) -> Weight { + fn feasibility_check(v: u32, t: u32, a: u32, d: u32, ) -> Weight { (0 as Weight) // Standard Error: 3_000 - .saturating_add((3_478_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 6_000 - .saturating_add((8_930_000 as Weight).saturating_mul(a as Weight)) - // Standard Error: 47_000 - .saturating_add((5_199_000 as Weight).saturating_mul(d as Weight)) + .saturating_add((1_910_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 7_000 + .saturating_add((111_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 13_000 + .saturating_add((7_741_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 19_000 + .saturating_add((1_844_000 as Weight).saturating_mul(d as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) } } diff --git a/frame/election-provider-support/Cargo.toml b/frame/election-provider-support/Cargo.toml index dfe2b11024334..46e6500cac33a 100644 --- a/frame/election-provider-support/Cargo.toml +++ b/frame/election-provider-support/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "election provider supporting traits" readme = "README.md" diff --git a/frame/elections-phragmen/Cargo.toml b/frame/elections-phragmen/Cargo.toml index f2771a9f72783..80afc3f5d8b00 100644 --- a/frame/elections-phragmen/Cargo.toml +++ b/frame/elections-phragmen/Cargo.toml @@ -4,7 +4,7 @@ version = "5.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet based on seq-Phragmén election method." readme = "README.md" diff --git a/frame/elections-phragmen/src/benchmarking.rs b/frame/elections-phragmen/src/benchmarking.rs index 7cb83b3dd7799..9bc63848607ab 100644 --- a/frame/elections-phragmen/src/benchmarking.rs +++ b/frame/elections-phragmen/src/benchmarking.rs @@ -21,10 +21,11 @@ use super::*; -use frame_benchmarking::{ - account, benchmarks, impl_benchmark_test_suite, whitelist, BenchmarkError, BenchmarkResult, +use frame_benchmarking::{account, benchmarks, whitelist, BenchmarkError, BenchmarkResult}; +use frame_support::{ + dispatch::{DispatchResultWithPostInfo, UnfilteredDispatchable}, + traits::OnInitialize, }; -use frame_support::{dispatch::DispatchResultWithPostInfo, traits::OnInitialize}; use frame_system::RawOrigin; use crate::Pallet as Elections; @@ -401,15 +402,23 @@ benchmarks! { let _ = fill_seats_up_to::(m)?; let removing = as_lookup::(>::members_ids()[0].clone()); + let who = T::Lookup::lookup(removing.clone()).expect("member was added above"); + let call = Call::::remove_member { who: removing, has_replacement: false }.encode(); }: { assert_eq!( - >::remove_member(RawOrigin::Root.into(), removing, false).unwrap_err().error, + as Decode>::decode(&mut &*call) + .expect("call is encoded above, encoding must be correct") + .dispatch_bypass_filter(RawOrigin::Root.into()) + .unwrap_err() + .error, Error::::InvalidReplacement.into(), ); } verify { // must still have enough members. assert_eq!(>::members().len() as u32, T::DesiredMembers::get()); + // on fail, `who` must still be a member + assert!(>::members_ids().contains(&who)); #[cfg(test)] { // reset members in between benchmark tests. @@ -538,11 +547,11 @@ benchmarks! { MEMBERS.with(|m| *m.borrow_mut() = vec![]); } } -} -impl_benchmark_test_suite!( - Elections, - crate::tests::ExtBuilder::default().desired_members(13).desired_runners_up(7), - crate::tests::Test, - exec_name = build_and_execute, -); + impl_benchmark_test_suite!( + Elections, + crate::tests::ExtBuilder::default().desired_members(13).desired_runners_up(7), + crate::tests::Test, + exec_name = build_and_execute, + ); +} diff --git a/frame/elections/Cargo.toml b/frame/elections/Cargo.toml index 8557cfba6b58c..f5c7acb1cd5d5 100644 --- a/frame/elections/Cargo.toml +++ b/frame/elections/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet for elections" readme = "README.md" diff --git a/frame/example-offchain-worker/Cargo.toml b/frame/example-offchain-worker/Cargo.toml index 1ccd9f33f0318..2759664d6e653 100644 --- a/frame/example-offchain-worker/Cargo.toml +++ b/frame/example-offchain-worker/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Unlicense" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME example pallet for offchain worker" readme = "README.md" diff --git a/frame/example-parallel/Cargo.toml b/frame/example-parallel/Cargo.toml index 5e0f6d4bc255a..6511a1cd369bf 100644 --- a/frame/example-parallel/Cargo.toml +++ b/frame/example-parallel/Cargo.toml @@ -4,7 +4,7 @@ version = "3.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Unlicense" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME example pallet using runtime worker threads" diff --git a/frame/example/Cargo.toml b/frame/example/Cargo.toml index 58daaf1c75558..e144f1e927d36 100644 --- a/frame/example/Cargo.toml +++ b/frame/example/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Unlicense" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME example pallet" readme = "README.md" diff --git a/frame/example/src/benchmarking.rs b/frame/example/src/benchmarking.rs index cdf6c152a4880..e89c646e03f1a 100644 --- a/frame/example/src/benchmarking.rs +++ b/frame/example/src/benchmarking.rs @@ -20,19 +20,19 @@ #![cfg(feature = "runtime-benchmarks")] use crate::*; -use frame_benchmarking::{benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_benchmarking::{benchmarks, whitelisted_caller}; use frame_system::RawOrigin; // To actually run this benchmark on pallet-example, we need to put this pallet into the // runtime and compile it with `runtime-benchmarks` feature. The detail procedures are // documented at: -// https://substrate.dev/docs/en/knowledgebase/runtime/benchmarking#how-to-benchmark +// https://docs.substrate.io/v3/runtime/benchmarking#how-to-benchmark // // The auto-generated weight estimate of this pallet is copied over to the `weights.rs` file. // The exact command of how the estimate generated is printed at the top of the file. // Details on using the benchmarks macro can be seen at: -// https://substrate.dev/rustdocs/v3.0.0/frame_benchmarking/macro.benchmarks.html +// https://paritytech.github.io/substrate/master/frame_benchmarking/trait.Benchmarking.html#tymethod.benchmarks benchmarks! { // This will measure the execution time of `set_dummy` for b in [1..1000] range. set_dummy_benchmark { @@ -65,12 +65,14 @@ benchmarks! { // The benchmark execution phase could also be a closure with custom code m.sort(); } -} -// This line generates test cases for benchmarking, and could be run by: -// `cargo test -p pallet-example --all-features`, you will see an additional line of: -// `test benchmarking::benchmark_tests::test_benchmarks ... ok` in the result. -// -// The line generates three steps per benchmark, with repeat=1 and the three steps are -// [low, mid, high] of the range. -impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::tests::Test); + // This line generates test cases for benchmarking, and could be run by: + // `cargo test -p pallet-example --all-features`, you will see one line per case: + // `test benchmarking::bench_sort_vector ... ok` + // `test benchmarking::bench_accumulate_dummy ... ok` + // `test benchmarking::bench_set_dummy_benchmark ... ok` in the result. + // + // The line generates three steps per benchmark, with repeat=1 and the three steps are + // [low, mid, high] of the range. + impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::tests::Test) +} diff --git a/frame/example/src/lib.rs b/frame/example/src/lib.rs index 23c4951c1a603..981274b1ba739 100644 --- a/frame/example/src/lib.rs +++ b/frame/example/src/lib.rs @@ -316,8 +316,7 @@ const MILLICENTS: u32 = 1_000_000_000; // - assigns a dispatch class `operational` if the argument of the call is more than 1000. // // More information can be read at: -// - https://substrate.dev/docs/en/knowledgebase/learn-substrate/weight -// - https://substrate.dev/docs/en/knowledgebase/runtime/fees#default-weight-annotations +// - https://docs.substrate.io/v3/runtime/weights-and-fees // // Manually configuring weight is an advanced operation and what you really need may well be // fulfilled by running the benchmarking toolchain. Refer to `benchmarking.rs` file. @@ -487,11 +486,12 @@ pub mod pallet { // the chain in a moderate rate. // // The parenthesized value of the `#[pallet::weight(..)]` attribute can be any type that - // implements a set of traits, namely [`WeighData`] and [`ClassifyDispatch`]. - // The former conveys the weight (a numeric representation of pure execution time and - // difficulty) of the transaction and the latter demonstrates the [`DispatchClass`] of the - // call. A higher weight means a larger transaction (less of which can be placed in a - // single block). + // implements a set of traits, namely [`WeighData`], [`ClassifyDispatch`], and + // [`PaysFee`]. The first conveys the weight (a numeric representation of pure + // execution time and difficulty) of the transaction and the second demonstrates the + // [`DispatchClass`] of the call, the third gives whereas extrinsic must pay fees or not. + // A higher weight means a larger transaction (less of which can be placed in a single + // block). // // The weight for this extrinsic we rely on the auto-generated `WeightInfo` from the // benchmark toolchain. @@ -548,7 +548,8 @@ pub mod pallet { // Print out log or debug message in the console via log::{error, warn, info, debug, // trace}, accepting format strings similar to `println!`. - // https://substrate.dev/rustdocs/v3.0.0/log/index.html + // https://paritytech.github.io/substrate/master/sp_io/logging/fn.log.html + // https://paritytech.github.io/substrate/master/frame_support/constant.LOG_TARGET.html info!("New value is now: {:?}", new_value); // Put the new value into storage. diff --git a/frame/executive/Cargo.toml b/frame/executive/Cargo.toml index 1abbf50e6a4c4..44b4dbcf2bd30 100644 --- a/frame/executive/Cargo.toml +++ b/frame/executive/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME executives engine" readme = "README.md" @@ -26,7 +26,7 @@ sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primiti sp-core = { version = "4.0.0-dev", default-features = false, path = "../../primitives/core" } [dev-dependencies] -hex-literal = "0.3.1" +hex-literal = "0.3.3" sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } sp-io = { version = "4.0.0-dev", path = "../../primitives/io" } pallet-balances = { version = "4.0.0-dev", path = "../balances" } diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index 244253185d238..b1bdf357ec07d 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -575,15 +575,9 @@ where #[cfg(test)] mod tests { use super::*; - use frame_support::{ - assert_err, parameter_types, - traits::{Currency, LockIdentifier, LockableCurrency, WithdrawReasons}, - weights::{IdentityFee, RuntimeDbWeight, Weight, WeightToFeePolynomial}, - }; - use frame_system::{Call as SystemCall, ChainContext, LastRuntimeUpgradeInfo}; + use hex_literal::hex; - use pallet_balances::Call as BalancesCall; - use pallet_transaction_payment::CurrencyAdapter; + use sp_core::H256; use sp_runtime::{ generic::{DigestItem, Era}, @@ -594,95 +588,135 @@ mod tests { }, DispatchError, }; + + use frame_support::{ + assert_err, parameter_types, + traits::{Currency, LockIdentifier, LockableCurrency, WithdrawReasons}, + weights::{IdentityFee, RuntimeDbWeight, Weight, WeightToFeePolynomial}, + }; + use frame_system::{Call as SystemCall, ChainContext, LastRuntimeUpgradeInfo}; + use pallet_balances::Call as BalancesCall; + use pallet_transaction_payment::CurrencyAdapter; + const TEST_KEY: &[u8] = &*b":test:key:"; + #[frame_support::pallet] mod custom { - use frame_support::weights::{DispatchClass, Weight}; - use sp_runtime::transaction_validity::{ - TransactionSource, TransactionValidity, TransactionValidityError, UnknownTransaction, - }; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + #[pallet::config] pub trait Config: frame_system::Config {} - frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin { - #[weight = 100] - fn some_function(origin) { - // NOTE: does not make any different. - frame_system::ensure_signed(origin)?; - } - #[weight = (200, DispatchClass::Operational)] - fn some_root_operation(origin) { - frame_system::ensure_root(origin)?; - } - #[weight = 0] - fn some_unsigned_message(origin) { - frame_system::ensure_none(origin)?; - } + #[pallet::hooks] + impl Hooks> for Pallet { + // module hooks. + // one with block number arg and one without + fn on_initialize(n: T::BlockNumber) -> Weight { + println!("on_initialize({})", n); + 175 + } - #[weight = 0] - fn allowed_unsigned(origin) { - frame_system::ensure_root(origin)?; - } + fn on_idle(n: T::BlockNumber, remaining_weight: Weight) -> Weight { + println!("on_idle{}, {})", n, remaining_weight); + 175 + } - #[weight = 0] - fn unallowed_unsigned(origin) { - frame_system::ensure_root(origin)?; - } + fn on_finalize(n: T::BlockNumber) { + println!("on_finalize({})", n); + } - #[weight = 0] - fn inherent_call(origin) { - let _ = frame_system::ensure_none(origin)?; - } + fn on_runtime_upgrade() -> Weight { + sp_io::storage::set(super::TEST_KEY, "module".as_bytes()); + 200 + } - // module hooks. - // one with block number arg and one without - fn on_initialize(n: T::BlockNumber) -> Weight { - println!("on_initialize({})", n); - 175 - } + fn offchain_worker(n: T::BlockNumber) { + assert_eq!(T::BlockNumber::from(1u32), n); + } + } - fn on_idle(n: T::BlockNumber, remaining_weight: Weight) -> Weight { - println!("on_idle{}, {})", n, remaining_weight); - 175 - } + #[pallet::call] + impl Pallet { + #[pallet::weight(100)] + pub fn some_function(origin: OriginFor) -> DispatchResult { + // NOTE: does not make any different. + frame_system::ensure_signed(origin)?; + Ok(()) + } - fn on_finalize() { - println!("on_finalize(?)"); - } + #[pallet::weight((200, DispatchClass::Operational))] + pub fn some_root_operation(origin: OriginFor) -> DispatchResult { + frame_system::ensure_root(origin)?; + Ok(()) + } - fn on_runtime_upgrade() -> Weight { - sp_io::storage::set(super::TEST_KEY, "module".as_bytes()); - 200 - } + #[pallet::weight(0)] + pub fn some_unsigned_message(origin: OriginFor) -> DispatchResult { + frame_system::ensure_none(origin)?; + Ok(()) + } - fn offchain_worker(n: T::BlockNumber) { - assert_eq!(T::BlockNumber::from(1u32), n); - } + #[pallet::weight(0)] + pub fn allowed_unsigned(origin: OriginFor) -> DispatchResult { + frame_system::ensure_root(origin)?; + Ok(()) + } - #[weight = 0] - fn calculate_storage_root(_origin) { - let root = sp_io::storage::root(); - sp_io::storage::set("storage_root".as_bytes(), &root); - } + #[pallet::weight(0)] + pub fn unallowed_unsigned(origin: OriginFor) -> DispatchResult { + frame_system::ensure_root(origin)?; + Ok(()) + } + + #[pallet::weight(0)] + pub fn inherent_call(origin: OriginFor) -> DispatchResult { + let _ = frame_system::ensure_none(origin)?; + Ok(()) + } + + #[pallet::weight(0)] + pub fn calculate_storage_root(_origin: OriginFor) -> DispatchResult { + let root = sp_io::storage::root(); + sp_io::storage::set("storage_root".as_bytes(), &root); + Ok(()) } } - impl frame_support::inherent::ProvideInherent for Module { + #[pallet::inherent] + impl ProvideInherent for Pallet { type Call = Call; + type Error = sp_inherents::MakeFatalError<()>; + const INHERENT_IDENTIFIER: [u8; 8] = *b"test1234"; - fn create_inherent(_data: &sp_inherents::InherentData) -> Option { + + fn create_inherent(_data: &InherentData) -> Option { None } + fn is_inherent(call: &Self::Call) -> bool { *call == Call::::inherent_call {} } } - impl sp_runtime::traits::ValidateUnsigned for Module { + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet { type Call = Call; + // Inherent call is accepted for being dispatched + fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { + match call { + Call::allowed_unsigned { .. } => Ok(()), + Call::inherent_call { .. } => Ok(()), + _ => Err(UnknownTransaction::NoUnsignedValidator.into()), + } + } + // Inherent call is not validated as unsigned fn validate_unsigned( _source: TransactionSource, @@ -693,15 +727,6 @@ mod tests { _ => UnknownTransaction::NoUnsignedValidator.into(), } } - - // Inherent call is accepted for being dispatched - fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { - match call { - Call::allowed_unsigned { .. } => Ok(()), - Call::inherent_call { .. } => Ok(()), - _ => Err(UnknownTransaction::NoUnsignedValidator.into()), - } - } } } @@ -775,10 +800,12 @@ mod tests { parameter_types! { pub const TransactionByteFee: Balance = 0; + pub const OperationalFeeMultiplier: u8 = 5; } impl pallet_transaction_payment::Config for Runtime { type OnChargeTransaction = CurrencyAdapter; type TransactionByteFee = TransactionByteFee; + type OperationalFeeMultiplier = OperationalFeeMultiplier; type WeightToFee = IdentityFee; type FeeMultiplierUpdate = (); } @@ -1085,8 +1112,6 @@ mod tests { let invalid = TestXt::new(Call::Custom(custom::Call::unallowed_unsigned {}), None); let mut t = new_test_ext(1); - let mut default_with_prio_3 = ValidTransaction::default(); - default_with_prio_3.priority = 3; t.execute_with(|| { assert_eq!( Executive::validate_transaction( @@ -1094,7 +1119,7 @@ mod tests { valid.clone(), Default::default(), ), - Ok(default_with_prio_3), + Ok(ValidTransaction::default()), ); assert_eq!( Executive::validate_transaction( diff --git a/frame/gilt/Cargo.toml b/frame/gilt/Cargo.toml index c275b693d8f27..c7dc384662f97 100644 --- a/frame/gilt/Cargo.toml +++ b/frame/gilt/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet for rewarding account freezing." readme = "README.md" diff --git a/frame/gilt/src/benchmarking.rs b/frame/gilt/src/benchmarking.rs index 55d34a35a7ce4..9c6d22a48398d 100644 --- a/frame/gilt/src/benchmarking.rs +++ b/frame/gilt/src/benchmarking.rs @@ -20,7 +20,7 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; -use frame_benchmarking::{benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_benchmarking::{benchmarks, whitelisted_caller}; use frame_support::{ dispatch::UnfilteredDispatchable, traits::{Currency, EnsureOrigin, Get}, @@ -50,17 +50,12 @@ benchmarks! { place_bid_max { let caller: T::AccountId = whitelisted_caller(); + let origin = RawOrigin::Signed(caller.clone()); T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); for i in 0..T::MaxQueueLen::get() { - Gilt::::place_bid(RawOrigin::Signed(caller.clone()).into(), T::MinFreeze::get(), 1)?; + Gilt::::place_bid(origin.clone().into(), T::MinFreeze::get(), 1)?; } - }: { - Gilt::::place_bid( - RawOrigin::Signed(caller.clone()).into(), - T::MinFreeze::get() * BalanceOf::::from(2u32), - 1, - )? - } + }: place_bid(origin, T::MinFreeze::get() * BalanceOf::::from(2u32), 1) verify { assert_eq!(QueueTotals::::get()[0], ( T::MaxQueueLen::get(), @@ -81,9 +76,9 @@ benchmarks! { } set_target { - let call = Call::::set_target { target: Default::default() }; let origin = T::AdminOrigin::successful_origin(); - }: { call.dispatch_bypass_filter(origin)? } + }: _(origin, Default::default()) + verify {} thaw { let caller: T::AccountId = whitelisted_caller(); @@ -131,6 +126,6 @@ benchmarks! { .dispatch_bypass_filter(T::AdminOrigin::successful_origin())?; }: { Gilt::::pursue_target(q) } -} -impl_benchmark_test_suite!(Gilt, crate::mock::new_test_ext(), crate::mock::Test); + impl_benchmark_test_suite!(Gilt, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/frame/gilt/src/lib.rs b/frame/gilt/src/lib.rs index de114e4bb87de..393b3acb41a36 100644 --- a/frame/gilt/src/lib.rs +++ b/frame/gilt/src/lib.rs @@ -128,7 +128,6 @@ pub mod pallet { /// The issuance to ignore. This is subtracted from the `Currency`'s `total_issuance` to get /// the issuance by which we inflate or deflate the gilt. - #[pallet::constant] type IgnoredIssuance: Get>; /// Number of duration queues in total. This sets the maximum duration supported, which is diff --git a/frame/grandpa/Cargo.toml b/frame/grandpa/Cargo.toml index 53ab443783e5d..36cc43fc3443e 100644 --- a/frame/grandpa/Cargo.toml +++ b/frame/grandpa/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet for GRANDPA finality gadget" readme = "README.md" diff --git a/frame/grandpa/src/benchmarking.rs b/frame/grandpa/src/benchmarking.rs index b0f70adb6061d..1e6be01ce8dbf 100644 --- a/frame/grandpa/src/benchmarking.rs +++ b/frame/grandpa/src/benchmarking.rs @@ -17,8 +17,6 @@ //! Benchmarks for the GRANDPA pallet. -#![cfg_attr(not(feature = "std"), no_std)] - use super::{Pallet as Grandpa, *}; use frame_benchmarking::benchmarks; use frame_system::RawOrigin; @@ -70,6 +68,12 @@ benchmarks! { verify { assert!(Grandpa::::stalled().is_some()); } + + impl_benchmark_test_suite!( + Pallet, + crate::mock::new_test_ext(vec![(1, 1), (2, 1), (3, 1)]), + crate::mock::Test, + ); } #[cfg(test)] @@ -77,12 +81,6 @@ mod tests { use super::*; use crate::mock::*; - frame_benchmarking::impl_benchmark_test_suite!( - Pallet, - crate::mock::new_test_ext(vec![(1, 1), (2, 1), (3, 1)]), - crate::mock::Test, - ); - #[test] fn test_generate_equivocation_report_blob() { let authorities = crate::tests::test_authorities(); diff --git a/frame/grandpa/src/lib.rs b/frame/grandpa/src/lib.rs index cd75deea770b4..9f6967a7d3c85 100644 --- a/frame/grandpa/src/lib.rs +++ b/frame/grandpa/src/lib.rs @@ -33,7 +33,7 @@ pub use sp_finality_grandpa as fg_primitives; use sp_std::prelude::*; -use codec::{self as codec, Decode, Encode}; +use codec::{self as codec, Decode, Encode, MaxEncodedLen}; pub use fg_primitives::{AuthorityId, AuthorityList, AuthorityWeight, VersionedAuthorityList}; use fg_primitives::{ ConsensusLog, EquivocationProof, ScheduledChange, SetId, GRANDPA_AUTHORITIES_KEY, @@ -41,9 +41,11 @@ use fg_primitives::{ }; use frame_support::{ dispatch::DispatchResultWithPostInfo, + pallet_prelude::Get, storage, traits::{KeyOwnerProofSystem, OneSessionHandler, StorageVersion}, weights::{Pays, Weight}, + WeakBoundedVec, }; use sp_runtime::{generic::DigestItem, traits::Zero, DispatchResult, KeyTypeId}; use sp_session::{GetSessionNumber, GetValidatorCount}; @@ -81,6 +83,7 @@ pub mod pallet { #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] #[pallet::storage_version(STORAGE_VERSION)] + #[pallet::generate_storage_info] pub struct Pallet(_); #[pallet::config] @@ -119,6 +122,10 @@ pub mod pallet { /// Weights for this pallet. type WeightInfo: WeightInfo; + + /// Max Authorities in use + #[pallet::constant] + type MaxAuthorities: Get; } #[pallet::hooks] @@ -133,13 +140,13 @@ pub mod pallet { median, ScheduledChange { delay: pending_change.delay, - next_authorities: pending_change.next_authorities.clone(), + next_authorities: pending_change.next_authorities.to_vec(), }, )) } else { Self::deposit_log(ConsensusLog::ScheduledChange(ScheduledChange { delay: pending_change.delay, - next_authorities: pending_change.next_authorities.clone(), + next_authorities: pending_change.next_authorities.to_vec(), })); } } @@ -147,7 +154,9 @@ pub mod pallet { // enact the change if we've reached the enacting block if block_number == pending_change.scheduled_at + pending_change.delay { Self::set_grandpa_authorities(&pending_change.next_authorities); - Self::deposit_event(Event::NewAuthorities(pending_change.next_authorities)); + Self::deposit_event(Event::NewAuthorities( + pending_change.next_authorities.to_vec(), + )); >::kill(); } } @@ -291,7 +300,8 @@ pub mod pallet { /// Pending change: (signaled at, scheduled change). #[pallet::storage] #[pallet::getter(fn pending_change)] - pub(super) type PendingChange = StorageValue<_, StoredPendingChange>; + pub(super) type PendingChange = + StorageValue<_, StoredPendingChange>; /// next block number where we can force a change. #[pallet::storage] @@ -355,15 +365,25 @@ pub trait WeightInfo { fn note_stalled() -> Weight; } +/// Bounded version of `AuthorityList`, `Limit` being the bound +pub type BoundedAuthorityList = WeakBoundedVec<(AuthorityId, AuthorityWeight), Limit>; + /// A stored pending change. -#[derive(Encode, Decode, TypeInfo)] -pub struct StoredPendingChange { +/// `Limit` is the bound for `next_authorities` +#[derive(Encode, Decode, TypeInfo, MaxEncodedLen)] +#[codec(mel_bound(Limit: Get))] +#[scale_info(skip_type_params(Limit))] +pub struct StoredPendingChange +where + Limit: Get, + N: MaxEncodedLen, +{ /// The block number this was scheduled at. pub scheduled_at: N, /// The delay in blocks until it will be applied. pub delay: N, - /// The next authority set. - pub next_authorities: AuthorityList, + /// The next authority set, weakly bounded in size by `Limit`. + pub next_authorities: BoundedAuthorityList, /// If defined it means the change was forced and the given block number /// indicates the median last finalized block when the change was signaled. pub forced: Option, @@ -372,7 +392,7 @@ pub struct StoredPendingChange { /// Current state of the GRANDPA authority set. State transitions must happen in /// the same order of states defined below, e.g. `Paused` implies a prior /// `PendingPause`. -#[derive(Decode, Encode, TypeInfo)] +#[derive(Decode, Encode, TypeInfo, MaxEncodedLen)] #[cfg_attr(test, derive(Debug, PartialEq))] pub enum StoredState { /// The current authority set is live, and GRANDPA is enabled. @@ -465,6 +485,14 @@ impl Pallet { >::put(scheduled_at + in_blocks * 2u32.into()); } + let next_authorities = WeakBoundedVec::<_, T::MaxAuthorities>::force_from( + next_authorities, + Some( + "Warning: The number of authorities given is too big. \ + A runtime configuration adjustment may be needed.", + ), + ); + >::put(StoredPendingChange { delay: in_blocks, scheduled_at, @@ -646,7 +674,7 @@ where SetIdSession::::insert(current_set_id, &session_index); } - fn on_disabled(i: usize) { + fn on_disabled(i: u32) { Self::deposit_log(ConsensusLog::OnDisabled(i as u64)) } } diff --git a/frame/grandpa/src/mock.rs b/frame/grandpa/src/mock.rs index 2f1b2630b2241..f1996553f02eb 100644 --- a/frame/grandpa/src/mock.rs +++ b/frame/grandpa/src/mock.rs @@ -111,7 +111,6 @@ where parameter_types! { pub const Period: u64 = 1; pub const Offset: u64 = 0; - pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(17); } /// Custom `SessionHandler` since we use `TestSessionKeys` as `Keys`. @@ -124,7 +123,6 @@ impl pallet_session::Config for Test { type SessionManager = pallet_session::historical::NoteHistoricalRoot; type SessionHandler = ::KeyTypeIdProviders; type Keys = TestSessionKeys; - type DisabledValidatorsThreshold = DisabledValidatorsThreshold; type WeightInfo = (); } @@ -191,6 +189,7 @@ parameter_types! { pub const MaxNominatorRewardedPerValidator: u32 = 64; pub const ElectionLookahead: u64 = 0; pub const StakingUnsignedPriority: u64 = u64::MAX / 2; + pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); } impl onchain::Config for Test { @@ -214,6 +213,7 @@ impl pallet_staking::Config for Test { type UnixTime = pallet_timestamp::Pallet; type EraPayout = pallet_staking::ConvertCurve; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; + type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type NextNewSession = Session; type ElectionProvider = onchain::OnChainSequentialPhragmen; type GenesisElectionProvider = Self::ElectionProvider; @@ -230,6 +230,7 @@ impl pallet_offences::Config for Test { parameter_types! { pub const ReportLongevity: u64 = BondingDuration::get() as u64 * SessionsPerEra::get() as u64 * Period::get(); + pub const MaxAuthorities: u32 = 100; } impl Config for Test { @@ -250,6 +251,7 @@ impl Config for Test { super::EquivocationHandler; type WeightInfo = (); + type MaxAuthorities = MaxAuthorities; } pub fn grandpa_log(log: ConsensusLog) -> DigestItem { diff --git a/frame/identity/Cargo.toml b/frame/identity/Cargo.toml index 598be25c5ef38..e6d1fa3e9dfbc 100644 --- a/frame/identity/Cargo.toml +++ b/frame/identity/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME identity management pallet" readme = "README.md" diff --git a/frame/identity/src/benchmarking.rs b/frame/identity/src/benchmarking.rs index 8bda24ddc73e1..68869a43992f9 100644 --- a/frame/identity/src/benchmarking.rs +++ b/frame/identity/src/benchmarking.rs @@ -22,7 +22,7 @@ use super::*; use crate::Pallet as Identity; -use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_benchmarking::{account, benchmarks, whitelisted_caller}; use frame_support::{ensure, traits::Get}; use frame_system::RawOrigin; use sp_runtime::traits::Bounded; @@ -411,6 +411,5 @@ benchmarks! { ensure!(!SuperOf::::contains_key(&caller), "Sub not removed"); } + impl_benchmark_test_suite!(Identity, crate::tests::new_test_ext(), crate::tests::Test); } - -impl_benchmark_test_suite!(Identity, crate::tests::new_test_ext(), crate::tests::Test); diff --git a/frame/im-online/Cargo.toml b/frame/im-online/Cargo.toml index a1efd626c0690..ef2fe54a8ceef 100644 --- a/frame/im-online/Cargo.toml +++ b/frame/im-online/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME's I'm online pallet" readme = "README.md" diff --git a/frame/im-online/src/benchmarking.rs b/frame/im-online/src/benchmarking.rs index 20812f03d28dd..012da53a183e5 100644 --- a/frame/im-online/src/benchmarking.rs +++ b/frame/im-online/src/benchmarking.rs @@ -21,7 +21,7 @@ use super::*; -use frame_benchmarking::{benchmarks, impl_benchmark_test_suite}; +use frame_benchmarking::benchmarks; use frame_support::{traits::UnfilteredDispatchable, WeakBoundedVec}; use frame_system::RawOrigin; use sp_core::{offchain::OpaqueMultiaddr, OpaquePeerId}; @@ -93,11 +93,13 @@ benchmarks! { let e in 1 .. MAX_EXTERNAL_ADDRESSES; let (input_heartbeat, signature) = create_heartbeat::(k, e)?; let call = Call::heartbeat { heartbeat: input_heartbeat, signature }; + let call_enc = call.encode(); }: { - ImOnline::::validate_unsigned(TransactionSource::InBlock, &call) - .map_err(<&str>::from)?; - call.dispatch_bypass_filter(RawOrigin::None.into())?; + ImOnline::::validate_unsigned(TransactionSource::InBlock, &call).map_err(<&str>::from)?; + as Decode>::decode(&mut &*call_enc) + .expect("call is encoded above, encoding must be correct") + .dispatch_bypass_filter(RawOrigin::None.into())?; } -} -impl_benchmark_test_suite!(ImOnline, crate::mock::new_test_ext(), crate::mock::Runtime); + impl_benchmark_test_suite!(ImOnline, crate::mock::new_test_ext(), crate::mock::Runtime); +} diff --git a/frame/im-online/src/lib.rs b/frame/im-online/src/lib.rs index 2fcaed1820ff9..d76bbaaa2fd14 100644 --- a/frame/im-online/src/lib.rs +++ b/frame/im-online/src/lib.rs @@ -412,7 +412,7 @@ pub mod pallet { pub(crate) type Keys = StorageValue<_, WeakBoundedVec, ValueQuery>; - /// For each session index, we keep a mapping of 'SessionIndex` and `AuthIndex` to + /// For each session index, we keep a mapping of `SessionIndex` and `AuthIndex` to /// `WrapperOpaque`. #[pallet::storage] #[pallet::getter(fn received_heartbeats)] @@ -919,7 +919,7 @@ impl OneSessionHandler for Pallet { } } - fn on_disabled(_i: usize) { + fn on_disabled(_i: u32) { // ignore } } diff --git a/frame/im-online/src/mock.rs b/frame/im-online/src/mock.rs index 92d1fe8e3f8b9..1e4d4b43d5789 100644 --- a/frame/im-online/src/mock.rs +++ b/frame/im-online/src/mock.rs @@ -27,7 +27,7 @@ use sp_core::H256; use sp_runtime::{ testing::{Header, TestXt, UintAuthorityId}, traits::{BlakeTwo256, ConvertInto, IdentityLookup}, - Perbill, Permill, + Permill, }; use sp_staking::{ offence::{OffenceError, ReportOffence}, @@ -146,10 +146,6 @@ parameter_types! { pub const Offset: u64 = 0; } -parameter_types! { - pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(33); -} - impl pallet_session::Config for Runtime { type ShouldEndSession = pallet_session::PeriodicSessions; type SessionManager = @@ -159,7 +155,6 @@ impl pallet_session::Config for Runtime { type ValidatorIdOf = ConvertInto; type Keys = UintAuthorityId; type Event = Event; - type DisabledValidatorsThreshold = DisabledValidatorsThreshold; type NextSessionRotation = pallet_session::PeriodicSessions; type WeightInfo = (); } diff --git a/frame/indices/Cargo.toml b/frame/indices/Cargo.toml index 17d04c43fa5d9..bf5a82fbb1da7 100644 --- a/frame/indices/Cargo.toml +++ b/frame/indices/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME indices management pallet" readme = "README.md" diff --git a/frame/indices/src/benchmarking.rs b/frame/indices/src/benchmarking.rs index ba0152008c41e..873dc18b20265 100644 --- a/frame/indices/src/benchmarking.rs +++ b/frame/indices/src/benchmarking.rs @@ -20,7 +20,7 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; -use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_benchmarking::{account, benchmarks, whitelisted_caller}; use frame_system::RawOrigin; use sp_runtime::traits::Bounded; @@ -91,6 +91,6 @@ benchmarks! { } // TODO in another PR: lookup and unlookup trait weights (not critical) -} -impl_benchmark_test_suite!(Indices, crate::mock::new_test_ext(), crate::mock::Test); + impl_benchmark_test_suite!(Indices, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/frame/lottery/Cargo.toml b/frame/lottery/Cargo.toml index f14d65310cc70..854b6f52470d7 100644 --- a/frame/lottery/Cargo.toml +++ b/frame/lottery/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME Participation Lottery Pallet" readme = "README.md" diff --git a/frame/lottery/src/benchmarking.rs b/frame/lottery/src/benchmarking.rs index 3b7035c72deb0..5407e16cd633f 100644 --- a/frame/lottery/src/benchmarking.rs +++ b/frame/lottery/src/benchmarking.rs @@ -21,8 +21,8 @@ use super::*; -use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; -use frame_support::traits::{EnsureOrigin, OnInitialize, UnfilteredDispatchable}; +use frame_benchmarking::{account, benchmarks, whitelisted_caller}; +use frame_support::traits::{EnsureOrigin, OnInitialize}; use frame_system::RawOrigin; use sp_runtime::traits::{Bounded, Zero}; @@ -73,11 +73,9 @@ benchmarks! { set_calls { let n in 0 .. T::MaxCalls::get() as u32; let calls = vec![frame_system::Call::::remark { remark: vec![] }.into(); n as usize]; - - let call = Call::::set_calls { calls }; let origin = T::ManagerOrigin::successful_origin(); assert!(CallIndices::::get().is_empty()); - }: { call.dispatch_bypass_filter(origin)? } + }: _(origin, calls) verify { if !n.is_zero() { assert!(!CallIndices::::get().is_empty()); @@ -88,10 +86,8 @@ benchmarks! { let price = BalanceOf::::max_value(); let end = 10u32.into(); let payout = 5u32.into(); - - let call = Call::::start_lottery { price, length: end, delay: payout, repeat: true }; let origin = T::ManagerOrigin::successful_origin(); - }: { call.dispatch_bypass_filter(origin)? } + }: _(origin, price, end, payout, true) verify { assert!(crate::Lottery::::get().is_some()); } @@ -99,9 +95,8 @@ benchmarks! { stop_repeat { setup_lottery::(true)?; assert_eq!(crate::Lottery::::get().unwrap().repeat, true); - let call = Call::::stop_repeat {}; let origin = T::ManagerOrigin::successful_origin(); - }: { call.dispatch_bypass_filter(origin)? } + }: _(origin) verify { assert_eq!(crate::Lottery::::get().unwrap().repeat, false); } @@ -168,6 +163,6 @@ benchmarks! { assert_eq!(Lottery::::pot().1, 0u32.into()); assert!(!T::Currency::free_balance(&winner).is_zero()) } -} -impl_benchmark_test_suite!(Lottery, crate::mock::new_test_ext(), crate::mock::Test); + impl_benchmark_test_suite!(Lottery, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/frame/membership/Cargo.toml b/frame/membership/Cargo.toml index acc82f7678de6..9fdfaa4731729 100644 --- a/frame/membership/Cargo.toml +++ b/frame/membership/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME membership management pallet" readme = "README.md" diff --git a/frame/membership/src/lib.rs b/frame/membership/src/lib.rs index 57a12c7c8a453..8fa2abb0ad3f3 100644 --- a/frame/membership/src/lib.rs +++ b/frame/membership/src/lib.rs @@ -357,9 +357,7 @@ impl, I: 'static> SortedMembers for Pallet { #[cfg(feature = "runtime-benchmarks")] mod benchmark { use super::{Pallet as Membership, *}; - use frame_benchmarking::{ - account, benchmarks_instance_pallet, impl_benchmark_test_suite, whitelist, - }; + use frame_benchmarking::{account, benchmarks_instance_pallet, whitelist}; use frame_support::{assert_ok, traits::EnsureOrigin}; use frame_system::RawOrigin; @@ -494,9 +492,9 @@ mod benchmark { assert!(::get_prime().is_none()); #[cfg(test)] crate::tests::clean(); } - } - impl_benchmark_test_suite!(Membership, crate::tests::new_bench_ext(), crate::tests::Test); + impl_benchmark_test_suite!(Membership, crate::tests::new_bench_ext(), crate::tests::Test); + } } #[cfg(test)] diff --git a/frame/merkle-mountain-range/Cargo.toml b/frame/merkle-mountain-range/Cargo.toml index 02b4be182ef82..942067ebde3ed 100644 --- a/frame/merkle-mountain-range/Cargo.toml +++ b/frame/merkle-mountain-range/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME Merkle Mountain Range pallet." diff --git a/frame/merkle-mountain-range/primitives/Cargo.toml b/frame/merkle-mountain-range/primitives/Cargo.toml index 07b2f8ae3a3a4..bbf55a2b7089e 100644 --- a/frame/merkle-mountain-range/primitives/Cargo.toml +++ b/frame/merkle-mountain-range/primitives/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME Merkle Mountain Range primitives." diff --git a/frame/merkle-mountain-range/rpc/Cargo.toml b/frame/merkle-mountain-range/rpc/Cargo.toml index 5a0f114e50173..926cfd602f673 100644 --- a/frame/merkle-mountain-range/rpc/Cargo.toml +++ b/frame/merkle-mountain-range/rpc/Cargo.toml @@ -4,7 +4,7 @@ version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Node-specific RPC methods for interaction with Merkle Mountain Range pallet." publish = false diff --git a/frame/merkle-mountain-range/src/benchmarking.rs b/frame/merkle-mountain-range/src/benchmarking.rs index 2680b3d030067..d6ef76d01ac3a 100644 --- a/frame/merkle-mountain-range/src/benchmarking.rs +++ b/frame/merkle-mountain-range/src/benchmarking.rs @@ -17,10 +17,8 @@ //! Benchmarks for the MMR pallet. -#![cfg_attr(not(feature = "std"), no_std)] - use crate::*; -use frame_benchmarking::{benchmarks_instance_pallet, impl_benchmark_test_suite}; +use frame_benchmarking::benchmarks_instance_pallet; use frame_support::traits::OnInitialize; benchmarks_instance_pallet! { @@ -35,6 +33,6 @@ benchmarks_instance_pallet! { } verify { assert_eq!(crate::NumberOfLeaves::::get(), leaves); } -} -impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::mock::Test); + impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::mock::Test); +} diff --git a/frame/multisig/Cargo.toml b/frame/multisig/Cargo.toml index 177334d4ccf8d..d1bd23dcab581 100644 --- a/frame/multisig/Cargo.toml +++ b/frame/multisig/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME multi-signature dispatch pallet" readme = "README.md" diff --git a/frame/multisig/src/benchmarking.rs b/frame/multisig/src/benchmarking.rs index 2e23dff156e07..edfeba253e5f0 100644 --- a/frame/multisig/src/benchmarking.rs +++ b/frame/multisig/src/benchmarking.rs @@ -21,7 +21,7 @@ use super::*; use core::convert::TryInto; -use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite}; +use frame_benchmarking::{account, benchmarks}; use frame_system::RawOrigin; use sp_runtime::traits::Bounded; @@ -297,6 +297,6 @@ benchmarks! { assert!(!Multisigs::::contains_key(multi_account_id, call_hash)); assert!(!Calls::::contains_key(call_hash)); } -} -impl_benchmark_test_suite!(Multisig, crate::tests::new_test_ext(), crate::tests::Test); + impl_benchmark_test_suite!(Multisig, crate::tests::new_test_ext(), crate::tests::Test); +} diff --git a/frame/nicks/Cargo.toml b/frame/nicks/Cargo.toml index 431ee2c84157c..dacec5567ede4 100644 --- a/frame/nicks/Cargo.toml +++ b/frame/nicks/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet for nick management" readme = "README.md" diff --git a/frame/node-authorization/Cargo.toml b/frame/node-authorization/Cargo.toml index 635e72e3a8b8a..960f7e04688dd 100644 --- a/frame/node-authorization/Cargo.toml +++ b/frame/node-authorization/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet for node authorization" diff --git a/frame/offences/Cargo.toml b/frame/offences/Cargo.toml index 8fdcbf46fa3e1..97f4644a83ca0 100644 --- a/frame/offences/Cargo.toml +++ b/frame/offences/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME offences pallet" readme = "README.md" diff --git a/frame/offences/benchmarking/Cargo.toml b/frame/offences/benchmarking/Cargo.toml index b21e6cf9b7e13..f8459087cb7fb 100644 --- a/frame/offences/benchmarking/Cargo.toml +++ b/frame/offences/benchmarking/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME offences pallet benchmarking" readme = "README.md" diff --git a/frame/offences/benchmarking/src/lib.rs b/frame/offences/benchmarking/src/lib.rs index 35e3c1aec9403..c920b0b900dff 100644 --- a/frame/offences/benchmarking/src/lib.rs +++ b/frame/offences/benchmarking/src/lib.rs @@ -23,7 +23,7 @@ mod mock; use sp_std::{prelude::*, vec}; -use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite}; +use frame_benchmarking::{account, benchmarks}; use frame_support::traits::{Currency, ValidatorSet, ValidatorSetWithIdentification}; use frame_system::{Config as SystemConfig, Pallet as System, RawOrigin}; @@ -224,27 +224,49 @@ fn check_events::Event>>(expec .map(|frame_system::EventRecord { event, .. }| event) .collect::>(); let expected = expected.collect::>(); - let lengths = (events.len(), expected.len()); - let length_mismatch = if lengths.0 != lengths.1 { - fn pretty(header: &str, ev: &[D]) { - println!("{}", header); - for (idx, ev) in ev.iter().enumerate() { - println!("\t[{:04}] {:?}", idx, ev); - } + + fn pretty(header: &str, ev: &[D], offset: usize) { + println!("{}", header); + for (idx, ev) in ev.iter().enumerate() { + println!("\t[{:04}] {:?}", idx + offset, ev); } - pretty("--Got:", &events); - pretty("--Expected:", &expected); - format!("Mismatching length. Got: {}, expected: {}", lengths.0, lengths.1) - } else { - Default::default() - }; + } + fn print_events(idx: usize, events: &[D], expected: &[D]) { + let window = 10; + let start = idx.saturating_sub(window / 2); + let end_got = (idx + window / 2).min(events.len()); + pretty("Got(window):", &events[start..end_got], start); + let end_expected = (idx + window / 2).min(expected.len()); + pretty("Expected(window):", &expected[start..end_expected], start); + println!("---------------"); + let start_got = events.len().saturating_sub(window); + pretty("Got(end):", &events[start_got..], start_got); + let start_expected = expected.len().saturating_sub(window); + pretty("Expected(end):", &expected[start_expected..], start_expected); + } + let events_copy = events.clone(); + let expected_copy = expected.clone(); for (idx, (a, b)) in events.into_iter().zip(expected).enumerate() { - assert_eq!(a, b, "Mismatch at: {}. {}", idx, length_mismatch); + if a != b { + print_events(idx, &events_copy, &expected_copy); + println!("Mismatch at: {}", idx); + println!(" Got: {:?}", b); + println!("Expected: {:?}", a); + if events_copy.len() != expected_copy.len() { + println!( + "Mismatching lengths. Got: {}, Expected: {}", + events_copy.len(), + expected_copy.len() + ) + } + panic!("Mismatching events."); + } } - if !length_mismatch.is_empty() { - panic!("{}", length_mismatch); + if events_copy.len() != expected_copy.len() { + print_events(0, &events_copy, &expected_copy); + panic!("Mismatching lengths. Got: {}, Expected: {}", events_copy.len(), expected_copy.len()) } } @@ -288,46 +310,74 @@ benchmarks! { let bond_amount: u32 = UniqueSaturatedInto::::unique_saturated_into(bond_amount::()); let slash_amount = slash_fraction * bond_amount; let reward_amount = slash_amount * (1 + n) / 2; + let reward = reward_amount / r; let slash = |id| core::iter::once( ::Event::from(StakingEvent::::Slashed(id, BalanceOf::::from(slash_amount))) ); + let balance_slash = |id| core::iter::once( + ::Event::from(pallet_balances::Event::::Slashed(id, slash_amount.into())) + ); let chill = |id| core::iter::once( ::Event::from(StakingEvent::::Chilled(id)) ); - let mut slash_events = raw_offenders.into_iter() + let balance_deposit = |id, amount: u32| + ::Event::from(pallet_balances::Event::::Deposit(id, amount.into())); + let mut first = true; + let slash_events = raw_offenders.into_iter() .flat_map(|offender| { - let nom_slashes = offender.nominator_stashes.into_iter().flat_map(|nom| slash(nom)); - chill(offender.stash.clone()) - .chain(slash(offender.stash)) - .chain(nom_slashes) + let nom_slashes = offender.nominator_stashes.into_iter().flat_map(|nom| { + balance_slash(nom.clone()).map(Into::into) + .chain(slash(nom.clone()).map(Into::into)) + }).collect::>(); + + let mut events = chill(offender.stash.clone()).map(Into::into) + .chain(balance_slash(offender.stash.clone()).map(Into::into)) + .chain(slash(offender.stash.clone()).map(Into::into)) + .chain(nom_slashes.into_iter()) + .collect::>(); + + // the first deposit creates endowed events, see `endowed_reward_events` + if first { + first = false; + let mut reward_events = reporters.clone().into_iter() + .flat_map(|reporter| vec![ + balance_deposit(reporter.clone(), reward.into()).into(), + frame_system::Event::::NewAccount(reporter.clone()).into(), + ::Event::from( + pallet_balances::Event::::Endowed(reporter.clone(), reward.into()) + ).into(), + ]) + .collect::>(); + events.append(&mut reward_events); + events.into_iter() + } else { + let mut reward_events = reporters.clone().into_iter() + .map(|reporter| balance_deposit(reporter, reward.into()).into()) + .collect::>(); + events.append(&mut reward_events); + events.into_iter() + } }) .collect::>(); - let reward_events = reporters.into_iter() - .flat_map(|reporter| vec![ - frame_system::Event::::NewAccount(reporter.clone()).into(), - ::Event::from( - pallet_balances::Event::::Endowed(reporter, (reward_amount / r).into()) - ).into() - ]); - - // Rewards are applied after first offender and it's nominators. - // We split after: offender slash + offender chill + nominator slashes. - let slash_rest = slash_events.split_off(2 + n as usize); - // make sure that all slashes have been applied + + #[cfg(test)] - check_events::( - std::iter::empty() - .chain(slash_events.into_iter().map(Into::into)) - .chain(reward_events) - .chain(slash_rest.into_iter().map(Into::into)) - .chain(std::iter::once(::Event::from( - pallet_offences::Event::Offence( - UnresponsivenessOffence::::ID, - 0_u32.to_le_bytes().to_vec(), - ) - ).into())) - ); + { + // In case of error it's useful to see the inputs + println!("Inputs: r: {}, o: {}, n: {}", r, o, n); + // make sure that all slashes have been applied + check_events::( + std::iter::empty() + .chain(slash_events.into_iter().map(Into::into)) + .chain(std::iter::once(::Event::from( + pallet_offences::Event::Offence( + UnresponsivenessOffence::::ID, + 0_u32.to_le_bytes().to_vec(), + ) + ).into())) + ); + } } report_offence_grandpa { @@ -358,10 +408,10 @@ benchmarks! { assert_eq!( System::::event_count(), 0 + 1 // offence - + 2 // reporter (reward + endowment) - + 1 // offenders slashed + + 3 // reporter (reward + endowment) + + 2 // offenders slashed + 1 // offenders chilled - + n // nominators slashed + + 2 * n // nominators slashed ); } @@ -393,12 +443,12 @@ benchmarks! { assert_eq!( System::::event_count(), 0 + 1 // offence - + 2 // reporter (reward + endowment) - + 1 // offenders slashed + + 3 // reporter (reward + endowment) + + 2 // offenders slashed + 1 // offenders chilled - + n // nominators slashed + + 2 * n // nominators slashed ); } -} -impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/frame/offences/benchmarking/src/mock.rs b/frame/offences/benchmarking/src/mock.rs index 6973e25371d4f..3097f9b95be3f 100644 --- a/frame/offences/benchmarking/src/mock.rs +++ b/frame/offences/benchmarking/src/mock.rs @@ -112,7 +112,7 @@ impl pallet_session::SessionHandler for TestSessionHandler { ) { } - fn on_disabled(_: usize) {} + fn on_disabled(_: u32) {} } parameter_types! { @@ -129,7 +129,6 @@ impl pallet_session::Config for Test { type Event = Event; type ValidatorId = AccountId; type ValidatorIdOf = pallet_staking::StashOf; - type DisabledValidatorsThreshold = (); type WeightInfo = (); } @@ -175,6 +174,7 @@ impl pallet_staking::Config for Test { type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; + type OffendingValidatorsThreshold = (); type ElectionProvider = onchain::OnChainSequentialPhragmen; type GenesisElectionProvider = Self::ElectionProvider; type SortedListProvider = pallet_staking::UseNominatorsMap; diff --git a/frame/proxy/Cargo.toml b/frame/proxy/Cargo.toml index 83db82990d105..4f4cf2bf9d56d 100644 --- a/frame/proxy/Cargo.toml +++ b/frame/proxy/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME proxying pallet" readme = "README.md" @@ -42,5 +42,6 @@ std = [ runtime-benchmarks = [ "frame-benchmarking", "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", ] try-runtime = ["frame-support/try-runtime"] diff --git a/frame/proxy/src/benchmarking.rs b/frame/proxy/src/benchmarking.rs index e66f6782c19e1..1eb3ec5770544 100644 --- a/frame/proxy/src/benchmarking.rs +++ b/frame/proxy/src/benchmarking.rs @@ -21,7 +21,7 @@ use super::*; use crate::Pallet as Proxy; -use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_benchmarking::{account, benchmarks, whitelisted_caller}; use frame_system::RawOrigin; use sp_runtime::traits::Bounded; @@ -245,6 +245,6 @@ benchmarks! { verify { assert!(!Proxies::::contains_key(&anon)); } -} -impl_benchmark_test_suite!(Proxy, crate::tests::new_test_ext(), crate::tests::Test); + impl_benchmark_test_suite!(Proxy, crate::tests::new_test_ext(), crate::tests::Test); +} diff --git a/frame/randomness-collective-flip/Cargo.toml b/frame/randomness-collective-flip/Cargo.toml index 5e8eb6b082879..ba77312699172 100644 --- a/frame/randomness-collective-flip/Cargo.toml +++ b/frame/randomness-collective-flip/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME randomness collective flip pallet" readme = "README.md" diff --git a/frame/recovery/Cargo.toml b/frame/recovery/Cargo.toml index 40a89e9b59f89..092940f5173f8 100644 --- a/frame/recovery/Cargo.toml +++ b/frame/recovery/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME account recovery pallet" readme = "README.md" diff --git a/frame/scheduler/Cargo.toml b/frame/scheduler/Cargo.toml index 62b21fe04c9df..862321dfa6f26 100644 --- a/frame/scheduler/Cargo.toml +++ b/frame/scheduler/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Unlicense" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME example pallet" readme = "README.md" diff --git a/frame/scheduler/src/benchmarking.rs b/frame/scheduler/src/benchmarking.rs index 2c164eaede229..1065f17027744 100644 --- a/frame/scheduler/src/benchmarking.rs +++ b/frame/scheduler/src/benchmarking.rs @@ -20,7 +20,7 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; -use frame_benchmarking::{benchmarks, impl_benchmark_test_suite}; +use frame_benchmarking::benchmarks; use frame_support::{ensure, traits::OnInitialize}; use frame_system::RawOrigin; use sp_std::{prelude::*, vec}; @@ -139,6 +139,6 @@ benchmarks! { "didn't append schedule" ); } -} -impl_benchmark_test_suite!(Scheduler, crate::tests::new_test_ext(), crate::tests::Test); + impl_benchmark_test_suite!(Scheduler, crate::tests::new_test_ext(), crate::tests::Test); +} diff --git a/frame/scored-pool/Cargo.toml b/frame/scored-pool/Cargo.toml index 9d5f156c175d5..a7d75ccacb96e 100644 --- a/frame/scored-pool/Cargo.toml +++ b/frame/scored-pool/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet for scored pools" readme = "README.md" diff --git a/frame/session/Cargo.toml b/frame/session/Cargo.toml index 8f07de2e7a6db..32d298d3917c3 100644 --- a/frame/session/Cargo.toml +++ b/frame/session/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME sessions pallet" readme = "README.md" @@ -13,38 +13,40 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = [ - "derive", -] } +log = { version = "0.4.0", default-features = false } +impl-trait-for-tuples = "0.2.1" + +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } scale-info = { version = "1.0", default-features = false, features = ["derive"] } -sp-core = { version = "4.0.0-dev", default-features = false, path = "../../primitives/core" } + sp-std = { version = "4.0.0-dev", default-features = false, path = "../../primitives/std" } +sp-core = { version = "4.0.0-dev", default-features = false, path = "../../primitives/core" } sp-io = { version = "4.0.0-dev", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../primitives/runtime" } sp-session = { version = "4.0.0-dev", default-features = false, path = "../../primitives/session" } sp-staking = { version = "4.0.0-dev", default-features = false, path = "../../primitives/staking" } +sp-trie = { version = "4.0.0-dev", default-features = false, path = "../../primitives/trie", optional = true } + frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } frame-system = { version = "4.0.0-dev", default-features = false, path = "../system" } pallet-timestamp = { version = "4.0.0-dev", default-features = false, path = "../timestamp" } -sp-trie = { version = "4.0.0-dev", optional = true, default-features = false, path = "../../primitives/trie" } -log = { version = "0.4.0", default-features = false } -impl-trait-for-tuples = "0.2.1" [features] default = ["std", "historical"] historical = ["sp-trie"] std = [ + "log/std", "codec/std", "scale-info/std", "sp-std/std", - "sp-io/std", - "frame-support/std", "sp-core/std", + "sp-io/std", "sp-runtime/std", "sp-session/std", "sp-staking/std", - "pallet-timestamp/std", "sp-trie/std", - "log/std", + "frame-support/std", + "frame-system/std", + "pallet-timestamp/std", ] try-runtime = ["frame-support/try-runtime"] diff --git a/frame/session/README.md b/frame/session/README.md index c47b5610de09c..09132470d4433 100644 --- a/frame/session/README.md +++ b/frame/session/README.md @@ -1,11 +1,11 @@ -# Session Module +# Session Pallet The Session module allows validators to manage their session keys, provides a function for changing the session length, and handles session rotation. - [`session::Trait`](https://docs.rs/pallet-session/latest/pallet_session/trait.Config.html) - [`Call`](https://docs.rs/pallet-session/latest/pallet_session/enum.Call.html) -- [`Module`](https://docs.rs/pallet-session/latest/pallet_session/struct.Module.html) +- [`Pallet`](https://docs.rs/pallet-session/latest/pallet_session/struct.Pallet.html) ## Overview @@ -72,11 +72,11 @@ The [Staking pallet](https://docs.rs/pallet-staking/latest/pallet_staking/) uses use pallet_session as session; fn validators() -> Vec<::ValidatorId> { - >::validators() + >::validators() } ``` -## Related Modules +## Related Pallets - [Staking](https://docs.rs/pallet-staking/latest/pallet_staking/) diff --git a/frame/session/benchmarking/Cargo.toml b/frame/session/benchmarking/Cargo.toml index cc242085bf5e4..0d0868d439215 100644 --- a/frame/session/benchmarking/Cargo.toml +++ b/frame/session/benchmarking/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME sessions pallet benchmarking" readme = "README.md" @@ -13,39 +13,37 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-session = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/session" } -sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/runtime" } +rand = { version = "0.7.2", default-features = false } + sp-std = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/std" } -frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } +sp-runtime = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/runtime" } +sp-session = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/session" } + frame-benchmarking = { version = "4.0.0-dev", default-features = false, path = "../../benchmarking" } frame-support = { version = "4.0.0-dev", default-features = false, path = "../../support" } -pallet-staking = { version = "4.0.0-dev", default-features = false, features = [ - "runtime-benchmarks", -], path = "../../staking" } +frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } pallet-session = { version = "4.0.0-dev", default-features = false, path = "../../session" } -rand = { version = "0.7.2", default-features = false } +pallet-staking = { version = "4.0.0-dev", default-features = false, features = ["runtime-benchmarks"], path = "../../staking" } [dev-dependencies] -codec = { package = "parity-scale-codec", version = "2.0.0", features = [ - "derive", -] } +codec = { package = "parity-scale-codec", version = "2.0.0", features = ["derive"] } scale-info = "1.0" sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } -pallet-staking-reward-curve = { version = "4.0.0-dev", path = "../../staking/reward-curve" } sp-io = { version = "4.0.0-dev", path = "../../../primitives/io" } -pallet-timestamp = { version = "4.0.0-dev", path = "../../timestamp" } pallet-balances = { version = "4.0.0-dev", path = "../../balances" } +pallet-timestamp = { version = "4.0.0-dev", path = "../../timestamp" } +pallet-staking-reward-curve = { version = "4.0.0-dev", path = "../../staking/reward-curve" } frame-election-provider-support = { version = "4.0.0-dev", path = "../../election-provider-support" } [features] default = ["std"] std = [ "sp-std/std", - "sp-session/std", "sp-runtime/std", - "frame-system/std", + "sp-session/std", "frame-benchmarking/std", "frame-support/std", - "pallet-staking/std", + "frame-system/std", "pallet-session/std", + "pallet-staking/std", ] diff --git a/frame/session/benchmarking/src/lib.rs b/frame/session/benchmarking/src/lib.rs index 8b84145c1acfd..8ca713b1bbf61 100644 --- a/frame/session/benchmarking/src/lib.rs +++ b/frame/session/benchmarking/src/lib.rs @@ -24,13 +24,13 @@ mod mock; use sp_std::{prelude::*, vec}; -use frame_benchmarking::{benchmarks, impl_benchmark_test_suite}; +use frame_benchmarking::benchmarks; use frame_support::{ codec::Decode, traits::{KeyOwnerProofSystem, OnInitialize}, }; use frame_system::RawOrigin; -use pallet_session::{historical::Module as Historical, Module as Session, *}; +use pallet_session::{historical::Module as Historical, Pallet as Session, *}; use pallet_staking::{ benchmarking::create_validator_with_nominators, testing_utils::create_validators, RewardDestination, @@ -39,7 +39,7 @@ use sp_runtime::traits::{One, StaticLookup}; const MAX_VALIDATORS: u32 = 1000; -pub struct Pallet(pallet_session::Module); +pub struct Pallet(pallet_session::Pallet); pub trait Config: pallet_session::Config + pallet_session::historical::Config + pallet_staking::Config { @@ -47,7 +47,7 @@ pub trait Config: impl OnInitialize for Pallet { fn on_initialize(n: T::BlockNumber) -> frame_support::weights::Weight { - pallet_session::Module::::on_initialize(n) + pallet_session::Pallet::::on_initialize(n) } } @@ -115,6 +115,8 @@ benchmarks! { verify { assert!(Historical::::check_proof(key, key_owner_proof2).is_some()); } + + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test, extra = false); } /// Sets up the benchmark for checking a membership proof. It creates the given @@ -161,5 +163,3 @@ fn check_membership_proof_setup( (key, Historical::::prove(key).unwrap()) } - -impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test, extra = false); diff --git a/frame/session/benchmarking/src/mock.rs b/frame/session/benchmarking/src/mock.rs index 4d3a1a2d8689d..f534cc097e8a0 100644 --- a/frame/session/benchmarking/src/mock.rs +++ b/frame/session/benchmarking/src/mock.rs @@ -117,7 +117,7 @@ impl pallet_session::SessionHandler for TestSessionHandler { ) { } - fn on_disabled(_: usize) {} + fn on_disabled(_: u32) {} } impl pallet_session::Config for Test { @@ -129,7 +129,6 @@ impl pallet_session::Config for Test { type Event = Event; type ValidatorId = AccountId; type ValidatorIdOf = pallet_staking::StashOf; - type DisabledValidatorsThreshold = (); type WeightInfo = (); } pallet_staking_reward_curve::build! { @@ -180,6 +179,7 @@ impl pallet_staking::Config for Test { type EraPayout = pallet_staking::ConvertCurve; type NextNewSession = Session; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; + type OffendingValidatorsThreshold = (); type ElectionProvider = onchain::OnChainSequentialPhragmen; type GenesisElectionProvider = Self::ElectionProvider; type SortedListProvider = pallet_staking::UseNominatorsMap; diff --git a/frame/session/src/historical/mod.rs b/frame/session/src/historical/mod.rs index c9b13e3c7f262..0801b2aca1701 100644 --- a/frame/session/src/historical/mod.rs +++ b/frame/session/src/historical/mod.rs @@ -26,7 +26,7 @@ //! These roots and proofs of inclusion can be generated at any time during the current session. //! Afterwards, the proofs can be fed to a consensus module when reporting misbehavior. -use super::{Module as SessionModule, SessionIndex}; +use super::{Pallet as SessionModule, SessionIndex}; use codec::{Decode, Encode}; use frame_support::{ decl_module, decl_storage, print, @@ -114,11 +114,11 @@ impl ValidatorSet for Module { type ValidatorIdOf = T::ValidatorIdOf; fn session_index() -> sp_staking::SessionIndex { - super::Module::::current_index() + super::Pallet::::current_index() } fn validators() -> Vec { - super::Module::::validators() + super::Pallet::::validators() } } @@ -366,11 +366,13 @@ pub(crate) mod tests { use crate::mock::{ force_new_session, set_next_validators, Session, System, Test, NEXT_VALIDATORS, }; + + use sp_runtime::{key_types::DUMMY, testing::UintAuthorityId}; + use frame_support::{ - traits::{KeyOwnerProofSystem, OnInitialize}, + traits::{GenesisBuild, KeyOwnerProofSystem, OnInitialize}, BasicExternalities, }; - use sp_runtime::{key_types::DUMMY, testing::UintAuthorityId}; type Historical = Module; diff --git a/frame/session/src/historical/offchain.rs b/frame/session/src/historical/offchain.rs index 8583c2bb439be..b646ecc2764f7 100644 --- a/frame/session/src/historical/offchain.rs +++ b/frame/session/src/historical/offchain.rs @@ -30,15 +30,11 @@ use sp_runtime::{ KeyTypeId, }; use sp_session::MembershipProof; - -use super::{ - super::{Pallet as SessionModule, SessionIndex}, - Config, IdentificationTuple, ProvingTrie, -}; - -use super::shared; use sp_std::prelude::*; +use super::{shared, Config, IdentificationTuple, ProvingTrie}; +use crate::{Pallet as SessionModule, SessionIndex}; + /// A set of validators, which was used for a fixed session index. struct ValidatorSet { validator_set: Vec>, @@ -142,23 +138,24 @@ pub fn keep_newest(n_to_keep: usize) { #[cfg(test)] mod tests { - use super::{ - super::{onchain, Module}, - *, - }; - use crate::mock::{ - force_new_session, set_next_validators, Session, System, Test, NEXT_VALIDATORS, + use super::*; + use crate::{ + historical::{onchain, Module}, + mock::{force_new_session, set_next_validators, Session, System, Test, NEXT_VALIDATORS}, }; + use codec::Encode; - use frame_support::traits::{KeyOwnerProofSystem, OnInitialize}; use sp_core::{ crypto::key_types::DUMMY, offchain::{testing::TestOffchainExt, OffchainDbExt, OffchainWorkerExt, StorageKind}, }; - - use frame_support::BasicExternalities; use sp_runtime::testing::UintAuthorityId; + use frame_support::{ + traits::{GenesisBuild, KeyOwnerProofSystem, OnInitialize}, + BasicExternalities, + }; + type Historical = Module; pub fn new_test_ext() -> sp_io::TestExternalities { diff --git a/frame/session/src/historical/onchain.rs b/frame/session/src/historical/onchain.rs index 514e343f4e0f6..c80817c28d723 100644 --- a/frame/session/src/historical/onchain.rs +++ b/frame/session/src/historical/onchain.rs @@ -19,15 +19,11 @@ use codec::Encode; use sp_runtime::traits::Convert; - -use super::{ - super::{Config as SessionConfig, Pallet as SessionModule, SessionIndex}, - Config as HistoricalConfig, -}; - -use super::shared; use sp_std::prelude::*; +use super::{shared, Config as HistoricalConfig}; +use crate::{Config as SessionConfig, Pallet as SessionModule, SessionIndex}; + /// Store the validator-set associated to the `session_index` to the off-chain database. /// /// Further processing is then done [`off-chain side`](super::offchain). diff --git a/frame/session/src/historical/shared.rs b/frame/session/src/historical/shared.rs index e801aa80eef4c..182e9ecacee19 100644 --- a/frame/session/src/historical/shared.rs +++ b/frame/session/src/historical/shared.rs @@ -18,8 +18,8 @@ //! Shared logic between on-chain and off-chain components used for slashing using an off-chain //! worker. -use super::SessionIndex; use codec::Encode; +use sp_staking::SessionIndex; use sp_std::prelude::*; pub(super) const PREFIX: &[u8] = b"session_historical"; diff --git a/frame/session/src/lib.rs b/frame/session/src/lib.rs index e57decec8c651..7fe163e0dfeac 100644 --- a/frame/session/src/lib.rs +++ b/frame/session/src/lib.rs @@ -15,14 +15,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! # Session Module +//! # Session Pallet //! -//! The Session module allows validators to manage their session keys, provides a function for +//! The Session pallet allows validators to manage their session keys, provides a function for //! changing the session length, and handles session rotation. //! //! - [`Config`] //! - [`Call`] -//! - [`Module`] +//! - [`Pallet`] //! //! ## Overview //! @@ -95,12 +95,12 @@ //! use pallet_session as session; //! //! fn validators() -> Vec<::ValidatorId> { -//! >::validators() +//! >::validators() //! } //! # fn main(){} //! ``` //! -//! ## Related Modules +//! ## Related Pallets //! //! - [Staking](../pallet_staking/index.html) @@ -114,29 +114,30 @@ mod mock; mod tests; pub mod weights; -use codec::{Decode, MaxEncodedLen}; use frame_support::{ - decl_error, decl_event, decl_module, decl_storage, - dispatch::{self, DispatchError, DispatchResult}, + codec::{Decode, MaxEncodedLen}, + dispatch::{DispatchError, DispatchResult}, ensure, traits::{ EstimateNextNewSession, EstimateNextSessionRotation, FindAuthor, Get, OneSessionHandler, - ValidatorRegistration, ValidatorSet, + StorageVersion, ValidatorRegistration, ValidatorSet, }, weights::Weight, - ConsensusEngineId, Parameter, + Parameter, }; -use frame_system::ensure_signed; use sp_runtime::{ traits::{AtLeast32BitUnsigned, Convert, Member, One, OpaqueKeys, Zero}, - KeyTypeId, Perbill, Permill, RuntimeAppPublic, + ConsensusEngineId, KeyTypeId, Permill, RuntimeAppPublic, }; use sp_staking::SessionIndex; use sp_std::{ + convert::TryFrom, marker::PhantomData, ops::{Rem, Sub}, prelude::*, }; + +pub use pallet::*; pub use weights::WeightInfo; /// Decides whether the session should be ended. @@ -228,7 +229,7 @@ pub trait SessionManager { /// /// Even if the validator-set is the same as before, if any underlying economic conditions have /// changed (i.e. stake-weights), the new validator set must be returned. This is necessary for - /// consensus engines making use of the session module to issue a validator-set change so + /// consensus engines making use of the session pallet to issue a validator-set change so /// misbehavior can be provably associated with the new economic conditions as opposed to the /// old. The returned validator set, if any, will not be applied until `new_index`. `new_index` /// is strictly greater than from previous call. @@ -280,7 +281,7 @@ pub trait SessionHandler { fn on_genesis_session(validators: &[(ValidatorId, Ks)]); /// Session set has changed; act appropriately. Note that this can be called - /// before initialization of your module. + /// before initialization of your pallet. /// /// `changed` is true whenever any of the session keys or underlying economic /// identities or weightings behind those keys has changed. @@ -297,7 +298,7 @@ pub trait SessionHandler { fn on_before_session_ending() {} /// A validator got disabled. Act accordingly until a new session begins. - fn on_disabled(validator_index: usize); + fn on_disabled(validator_index: u32); } #[impl_trait_for_tuples::impl_for_tuples(1, 30)] @@ -341,7 +342,7 @@ impl SessionHandler for Tuple { for_tuples!( #( Tuple::on_before_session_ending(); )* ) } - fn on_disabled(i: usize) { + fn on_disabled(i: u32) { for_tuples!( #( Tuple::on_disabled(i); )* ) } } @@ -353,89 +354,84 @@ impl SessionHandler for TestSessionHandler { fn on_genesis_session(_: &[(AId, Ks)]) {} fn on_new_session(_: bool, _: &[(AId, Ks)], _: &[(AId, Ks)]) {} fn on_before_session_ending() {} - fn on_disabled(_: usize) {} + fn on_disabled(_: u32) {} } -impl ValidatorRegistration for Module { - fn is_registered(id: &T::ValidatorId) -> bool { - Self::load_keys(id).is_some() - } -} - -pub trait Config: frame_system::Config { - /// The overarching event type. - type Event: From + Into<::Event>; - - /// A stable ID for a validator. - type ValidatorId: Member + Parameter + MaxEncodedLen; - - /// A conversion from account ID to validator ID. - /// - /// Its cost must be at most one storage read. - type ValidatorIdOf: Convert>; - - /// Indicator for when to end the session. - type ShouldEndSession: ShouldEndSession; - - /// Something that can predict the next session rotation. This should typically come from the - /// same logical unit that provides [`ShouldEndSession`], yet, it gives a best effort estimate. - /// It is helpful to implement [`EstimateNextNewSession`]. - type NextSessionRotation: EstimateNextSessionRotation; - - /// Handler for managing new session. - type SessionManager: SessionManager; - - /// Handler when a session has changed. - type SessionHandler: SessionHandler; - - /// The keys. - type Keys: OpaqueKeys + Member + Parameter + Default; - - /// The fraction of validators set that is safe to be disabled. - /// - /// After the threshold is reached `disabled` method starts to return true, - /// which in combination with `pallet_staking` forces a new era. - type DisabledValidatorsThreshold: Get; +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + /// The current storage version. + const STORAGE_VERSION: StorageVersion = StorageVersion::new(0); + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + #[pallet::storage_version(STORAGE_VERSION)] + pub struct Pallet(_); + + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching event type. + type Event: From + IsType<::Event>; + + /// A stable ID for a validator. + type ValidatorId: Member + + Parameter + + MaybeSerializeDeserialize + + MaxEncodedLen + + TryFrom; + + /// A conversion from account ID to validator ID. + /// + /// Its cost must be at most one storage read. + type ValidatorIdOf: Convert>; - /// Weight information for extrinsics in this pallet. - type WeightInfo: WeightInfo; -} + /// Indicator for when to end the session. + type ShouldEndSession: ShouldEndSession; -decl_storage! { - trait Store for Module as Session { - /// The current set of validators. - Validators get(fn validators): Vec; + /// Something that can predict the next session rotation. This should typically come from + /// the same logical unit that provides [`ShouldEndSession`], yet, it gives a best effort + /// estimate. It is helpful to implement [`EstimateNextNewSession`]. + type NextSessionRotation: EstimateNextSessionRotation; - /// Current index of the session. - CurrentIndex get(fn current_index): SessionIndex; + /// Handler for managing new session. + type SessionManager: SessionManager; - /// True if the underlying economic identities or weighting behind the validators - /// has changed in the queued validator set. - QueuedChanged: bool; + /// Handler when a session has changed. + type SessionHandler: SessionHandler; - /// The queued keys for the next session. When the next session begins, these keys - /// will be used to determine the validator's session keys. - QueuedKeys get(fn queued_keys): Vec<(T::ValidatorId, T::Keys)>; + /// The keys. + type Keys: OpaqueKeys + Member + Parameter + Default + MaybeSerializeDeserialize; - /// Indices of disabled validators. - /// - /// The set is cleared when `on_session_ending` returns a new set of identities. - DisabledValidators get(fn disabled_validators): Vec; + /// Weight information for extrinsics in this pallet. + type WeightInfo: WeightInfo; + } - /// The next session keys for a validator. - NextKeys: map hasher(twox_64_concat) T::ValidatorId => Option; + #[pallet::genesis_config] + pub struct GenesisConfig { + pub keys: Vec<(T::AccountId, T::ValidatorId, T::Keys)>, + } - /// The owner of a key. The key is the `KeyTypeId` + the encoded key. - KeyOwner: map hasher(twox_64_concat) (KeyTypeId, Vec) => Option; + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + Self { keys: Default::default() } + } } - add_extra_genesis { - config(keys): Vec<(T::AccountId, T::ValidatorId, T::Keys)>; - build(|config: &GenesisConfig| { + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { if T::SessionHandler::KEY_TYPE_IDS.len() != T::Keys::key_ids().len() { panic!("Number of keys in session handler and session keys does not match"); } - T::SessionHandler::KEY_TYPE_IDS.iter().zip(T::Keys::key_ids()).enumerate() + T::SessionHandler::KEY_TYPE_IDS + .iter() + .zip(T::Keys::key_ids()) + .enumerate() .for_each(|(i, (sk, kk))| { if sk != kk { panic!( @@ -445,8 +441,8 @@ decl_storage! { } }); - for (account, val, keys) in config.keys.iter().cloned() { - >::inner_set_keys(&val, keys) + for (account, val, keys) in self.keys.iter().cloned() { + >::inner_set_keys(&val, keys) .expect("genesis config must not contain duplicates; qed"); if frame_system::Pallet::::inc_consumers(&account).is_err() { // This will leak a provider reference, however it only happens once (at @@ -457,25 +453,30 @@ decl_storage! { } } - let initial_validators_0 = T::SessionManager::new_session_genesis(0) - .unwrap_or_else(|| { - frame_support::print("No initial validator provided by `SessionManager`, use \ - session config keys to generate initial validator set."); - config.keys.iter().map(|x| x.1.clone()).collect() + let initial_validators_0 = + T::SessionManager::new_session_genesis(0).unwrap_or_else(|| { + frame_support::print( + "No initial validator provided by `SessionManager`, use \ + session config keys to generate initial validator set.", + ); + self.keys.iter().map(|x| x.1.clone()).collect() }); - assert!(!initial_validators_0.is_empty(), "Empty validator set for session 0 in genesis block!"); + assert!( + !initial_validators_0.is_empty(), + "Empty validator set for session 0 in genesis block!" + ); let initial_validators_1 = T::SessionManager::new_session_genesis(1) .unwrap_or_else(|| initial_validators_0.clone()); - assert!(!initial_validators_1.is_empty(), "Empty validator set for session 1 in genesis block!"); + assert!( + !initial_validators_1.is_empty(), + "Empty validator set for session 1 in genesis block!" + ); let queued_keys: Vec<_> = initial_validators_1 .iter() .cloned() - .map(|v| ( - v.clone(), - >::load_keys(&v).unwrap_or_default(), - )) + .map(|v| (v.clone(), >::load_keys(&v).unwrap_or_default())) .collect(); // Tell everyone about the genesis session keys @@ -485,21 +486,64 @@ decl_storage! { >::put(queued_keys); T::SessionManager::start_session(0); - }); + } } -} -decl_event!( + /// The current set of validators. + #[pallet::storage] + #[pallet::getter(fn validators)] + pub type Validators = StorageValue<_, Vec, ValueQuery>; + + /// Current index of the session. + #[pallet::storage] + #[pallet::getter(fn current_index)] + pub type CurrentIndex = StorageValue<_, SessionIndex, ValueQuery>; + + /// True if the underlying economic identities or weighting behind the validators + /// has changed in the queued validator set. + #[pallet::storage] + pub type QueuedChanged = StorageValue<_, bool, ValueQuery>; + + /// The queued keys for the next session. When the next session begins, these keys + /// will be used to determine the validator's session keys. + #[pallet::storage] + #[pallet::getter(fn queued_keys)] + pub type QueuedKeys = StorageValue<_, Vec<(T::ValidatorId, T::Keys)>, ValueQuery>; + + /// Indices of disabled validators. + /// + /// The vec is always kept sorted so that we can find whether a given validator is + /// disabled using binary search. It gets cleared when `on_session_ending` returns + /// a new set of identities. + #[pallet::storage] + #[pallet::getter(fn disabled_validators)] + pub type DisabledValidators = StorageValue<_, Vec, ValueQuery>; + + /// The next session keys for a validator. + #[pallet::storage] + pub type NextKeys = + StorageMap<_, Twox64Concat, T::ValidatorId, T::Keys, OptionQuery>; + + /// The owner of a key. The key is the `KeyTypeId` + the encoded key. + #[pallet::storage] + pub type KeyOwner = + StorageMap<_, Twox64Concat, (KeyTypeId, Vec), T::ValidatorId, OptionQuery>; + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { /// New session has happened. Note that the argument is the \[session_index\], not the /// block number as the type might suggest. NewSession(SessionIndex), } -); -decl_error! { - /// Error for the session module. - pub enum Error for Module { + /// Old name generated by `decl_event`. + #[deprecated(note = "use `Event` instead")] + pub type RawEvent = Event; + + /// Error for the session pallet. + #[pallet::error] + pub enum Error { /// Invalid ownership proof. InvalidProof, /// No associated validator ID for account. @@ -511,14 +555,26 @@ decl_error! { /// Key setting account is not live, so it's impossible to associate keys. NoAccount, } -} - -decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - fn deposit_event() = default; + #[pallet::hooks] + impl Hooks> for Pallet { + /// Called when a block is initialized. Will rotate session if it is the last + /// block of the current session. + fn on_initialize(n: T::BlockNumber) -> Weight { + if T::ShouldEndSession::should_end_session(n) { + Self::rotate_session(); + T::BlockWeights::get().max_block + } else { + // NOTE: the non-database part of the weight for `should_end_session(n)` is + // included as weight for empty block, the database part is expected to be in + // cache. + 0 + } + } + } + #[pallet::call] + impl Pallet { /// Sets the session key(s) of the function caller to `keys`. /// Allows an account to set its session key prior to becoming a validator. /// This doesn't take effect until the next session. @@ -526,67 +582,56 @@ decl_module! { /// The dispatch origin of this function must be signed. /// /// # - /// - Complexity: `O(1)` - /// Actual cost depends on the number of length of `T::Keys::key_ids()` which is fixed. + /// - Complexity: `O(1)`. Actual cost depends on the number of length of + /// `T::Keys::key_ids()` which is fixed. /// - DbReads: `origin account`, `T::ValidatorIdOf`, `NextKeys` /// - DbWrites: `origin account`, `NextKeys` /// - DbReads per key id: `KeyOwner` /// - DbWrites per key id: `KeyOwner` /// # - #[weight = T::WeightInfo::set_keys()] - pub fn set_keys(origin, keys: T::Keys, proof: Vec) -> dispatch::DispatchResult { + #[pallet::weight(T::WeightInfo::set_keys())] + pub fn set_keys(origin: OriginFor, keys: T::Keys, proof: Vec) -> DispatchResult { let who = ensure_signed(origin)?; - ensure!(keys.ownership_proof_is_valid(&proof), Error::::InvalidProof); Self::do_set_keys(&who, keys)?; - Ok(()) } /// Removes any session key(s) of the function caller. + /// /// This doesn't take effect until the next session. /// - /// The dispatch origin of this function must be signed. + /// The dispatch origin of this function must be Signed and the account must be either be + /// convertible to a validator ID using the chain's typical addressing system (this usually + /// means being a controller account) or directly convertible into a validator ID (which + /// usually means being a stash account). /// /// # - /// - Complexity: `O(1)` in number of key types. - /// Actual cost depends on the number of length of `T::Keys::key_ids()` which is fixed. + /// - Complexity: `O(1)` in number of key types. Actual cost depends on the number of length + /// of `T::Keys::key_ids()` which is fixed. /// - DbReads: `T::ValidatorIdOf`, `NextKeys`, `origin account` /// - DbWrites: `NextKeys`, `origin account` /// - DbWrites per key id: `KeyOwner` /// # - #[weight = T::WeightInfo::purge_keys()] - pub fn purge_keys(origin) { + #[pallet::weight(T::WeightInfo::purge_keys())] + pub fn purge_keys(origin: OriginFor) -> DispatchResult { let who = ensure_signed(origin)?; Self::do_purge_keys(&who)?; - } - - /// Called when a block is initialized. Will rotate session if it is the last - /// block of the current session. - fn on_initialize(n: T::BlockNumber) -> Weight { - if T::ShouldEndSession::should_end_session(n) { - Self::rotate_session(); - T::BlockWeights::get().max_block - } else { - // NOTE: the non-database part of the weight for `should_end_session(n)` is - // included as weight for empty block, the database part is expected to be in - // cache. - 0 - } + Ok(()) } } } -impl Module { +impl Pallet { /// Move on to next session. Register new validator set and session keys. Changes to the /// validator set have a session of delay to take effect. This allows for equivocation /// punishment after a fork. pub fn rotate_session() { - let session_index = CurrentIndex::get(); + let session_index = >::get(); log::trace!(target: "runtime::session", "rotating session {:?}", session_index); - let changed = QueuedChanged::get(); + let changed = >::get(); // Inform the session handlers that a session is going to end. T::SessionHandler::on_before_session_ending(); @@ -600,12 +645,12 @@ impl Module { if changed { // reset disabled validators - DisabledValidators::take(); + >::take(); } // Increment session index. let session_index = session_index + 1; - CurrentIndex::put(session_index); + >::put(session_index); T::SessionManager::start_session(session_index); @@ -655,7 +700,7 @@ impl Module { }; >::put(queued_amalgamated.clone()); - QueuedChanged::put(next_changed); + >::put(next_changed); // Record that this happened. Self::deposit_event(Event::NewSession(session_index)); @@ -664,42 +709,34 @@ impl Module { T::SessionHandler::on_new_session::(changed, &session_keys, &queued_amalgamated); } - /// Disable the validator of index `i`. - /// - /// Returns `true` if this causes a `DisabledValidatorsThreshold` of validators - /// to be already disabled. - pub fn disable_index(i: usize) -> bool { - let (fire_event, threshold_reached) = DisabledValidators::mutate(|disabled| { - let i = i as u32; + /// Disable the validator of index `i`, returns `false` if the validator was already disabled. + pub fn disable_index(i: u32) -> bool { + if i >= Validators::::decode_len().unwrap_or(0) as u32 { + return false + } + + >::mutate(|disabled| { if let Err(index) = disabled.binary_search(&i) { - let count = >::decode_len().unwrap_or(0) as u32; - let threshold = T::DisabledValidatorsThreshold::get() * count; disabled.insert(index, i); - (true, disabled.len() as u32 > threshold) - } else { - (false, false) + T::SessionHandler::on_disabled(i); + return true } - }); - - if fire_event { - T::SessionHandler::on_disabled(i); - } - threshold_reached + false + }) } - /// Disable the validator identified by `c`. (If using with the staking module, + /// Disable the validator identified by `c`. (If using with the staking pallet, /// this would be their *stash* account.) /// - /// Returns `Ok(true)` if more than `DisabledValidatorsThreshold` validators in current - /// session is already disabled. - /// If used with the staking module it allows to force a new era in such case. - pub fn disable(c: &T::ValidatorId) -> sp_std::result::Result { + /// Returns `false` either if the validator could not be found or it was already + /// disabled. + pub fn disable(c: &T::ValidatorId) -> bool { Self::validators() .iter() .position(|i| i == c) - .map(Self::disable_index) - .ok_or(()) + .map(|i| Self::disable_index(i as u32)) + .unwrap_or(false) } /// Upgrade the key type from some old type to a new type. Supports adding @@ -711,7 +748,7 @@ impl Module { /// /// Care should be taken that the raw versions of the /// added keys are unique for every `ValidatorId, KeyTypeId` combination. - /// This is an invariant that the session module typically maintains internally. + /// This is an invariant that the session pallet typically maintains internally. /// /// As the actual values of the keys are typically not known at runtime upgrade, /// it's recommended to initialize the keys to a (unique) dummy value with the expectation @@ -756,7 +793,7 @@ impl Module { /// /// This ensures that the reference counter in system is incremented appropriately and as such /// must accept an account ID, rather than a validator ID. - fn do_set_keys(account: &T::AccountId, keys: T::Keys) -> dispatch::DispatchResult { + fn do_set_keys(account: &T::AccountId, keys: T::Keys) -> DispatchResult { let who = T::ValidatorIdOf::convert(account.clone()) .ok_or(Error::::NoAssociatedValidatorId)?; @@ -812,6 +849,10 @@ impl Module { fn do_purge_keys(account: &T::AccountId) -> DispatchResult { let who = T::ValidatorIdOf::convert(account.clone()) + // `purge_keys` may not have a controller-stash pair any more. If so then we expect the + // stash account to be passed in directly and convert that to a `ValidatorId` using the + // `TryFrom` trait if supported. + .or_else(|| T::ValidatorId::try_from(account.clone()).ok()) .ok_or(Error::::NoAssociatedValidatorId)?; let old_keys = Self::take_keys(&who).ok_or(Error::::NoKeys)?; @@ -850,16 +891,40 @@ impl Module { } } -impl ValidatorSet for Module { +impl ValidatorRegistration for Pallet { + fn is_registered(id: &T::ValidatorId) -> bool { + Self::load_keys(id).is_some() + } +} + +impl ValidatorSet for Pallet { type ValidatorId = T::ValidatorId; type ValidatorIdOf = T::ValidatorIdOf; fn session_index() -> sp_staking::SessionIndex { - Module::::current_index() + Pallet::::current_index() } fn validators() -> Vec { - Module::::validators() + Pallet::::validators() + } +} + +impl EstimateNextNewSession for Pallet { + fn average_session_length() -> T::BlockNumber { + T::NextSessionRotation::average_session_length() + } + + /// This session pallet always calls new_session and next_session at the same time, hence we + /// do a simple proxy and pass the function to next rotation. + fn estimate_next_new_session(now: T::BlockNumber) -> (Option, Weight) { + T::NextSessionRotation::estimate_next_session_rotation(now) + } +} + +impl frame_support::traits::DisabledValidators for Pallet { + fn is_disabled(index: u32) -> bool { + >::disabled_validators().binary_search(&index).is_ok() } } @@ -877,25 +942,7 @@ impl> FindAuthor { let i = Inner::find_author(digests)?; - let validators = >::validators(); + let validators = >::validators(); validators.get(i as usize).map(|k| k.clone()) } } - -impl EstimateNextNewSession for Module { - fn average_session_length() -> T::BlockNumber { - T::NextSessionRotation::average_session_length() - } - - /// This session module always calls new_session and next_session at the same time, hence we - /// do a simple proxy and pass the function to next rotation. - fn estimate_next_new_session(now: T::BlockNumber) -> (Option, Weight) { - T::NextSessionRotation::estimate_next_session_rotation(now) - } -} - -impl frame_support::traits::DisabledValidators for Module { - fn is_disabled(index: u32) -> bool { - >::disabled_validators().binary_search(&index).is_ok() - } -} diff --git a/frame/session/src/mock.rs b/frame/session/src/mock.rs index 449acaff5305d..6db7727fa5391 100644 --- a/frame/session/src/mock.rs +++ b/frame/session/src/mock.rs @@ -21,16 +21,18 @@ use super::*; use crate as pallet_session; #[cfg(feature = "historical")] use crate::historical as pallet_session_historical; -use frame_support::{parameter_types, BasicExternalities}; + +use std::{cell::RefCell, collections::BTreeMap}; + use sp_core::{crypto::key_types::DUMMY, H256}; use sp_runtime::{ impl_opaque_keys, testing::{Header, UintAuthorityId}, - traits::{BlakeTwo256, ConvertInto, IdentityLookup}, - Perbill, + traits::{BlakeTwo256, IdentityLookup}, }; use sp_staking::SessionIndex; -use std::cell::RefCell; + +use frame_support::{parameter_types, traits::GenesisBuild, BasicExternalities}; impl_opaque_keys! { pub struct MockSessionKeys { @@ -109,6 +111,7 @@ thread_local! { pub static DISABLED: RefCell = RefCell::new(false); // Stores if `on_before_session_end` was called pub static BEFORE_SESSION_END_CALLED: RefCell = RefCell::new(false); + pub static VALIDATOR_ACCOUNTS: RefCell> = RefCell::new(BTreeMap::new()); } pub struct TestShouldEndSession; @@ -141,7 +144,7 @@ impl SessionHandler for TestSessionHandler { .collect() }); } - fn on_disabled(_validator_index: usize) { + fn on_disabled(_validator_index: u32) { DISABLED.with(|l| *l.borrow_mut() = true) } fn on_before_session_ending() { @@ -223,6 +226,10 @@ pub fn new_test_ext() -> sp_io::TestExternalities { pallet_session::GenesisConfig:: { keys } .assimilate_storage(&mut t) .unwrap(); + NEXT_VALIDATORS.with(|l| { + let v = l.borrow().iter().map(|&i| (i, i)).collect(); + VALIDATOR_ACCOUNTS.with(|m| *m.borrow_mut() = v); + }); sp_io::TestExternalities::new(t) } @@ -266,8 +273,16 @@ impl pallet_timestamp::Config for Test { type WeightInfo = (); } -parameter_types! { - pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(33); +pub struct TestValidatorIdOf; +impl TestValidatorIdOf { + pub fn set(v: BTreeMap) { + VALIDATOR_ACCOUNTS.with(|m| *m.borrow_mut() = v); + } +} +impl Convert> for TestValidatorIdOf { + fn convert(x: u64) -> Option { + VALIDATOR_ACCOUNTS.with(|m| m.borrow().get(&x).cloned()) + } } impl Config for Test { @@ -278,10 +293,9 @@ impl Config for Test { type SessionManager = TestSessionManager; type SessionHandler = TestSessionHandler; type ValidatorId = u64; - type ValidatorIdOf = ConvertInto; + type ValidatorIdOf = TestValidatorIdOf; type Keys = MockSessionKeys; type Event = Event; - type DisabledValidatorsThreshold = DisabledValidatorsThreshold; type NextSessionRotation = (); type WeightInfo = (); } diff --git a/frame/session/src/tests.rs b/frame/session/src/tests.rs index 23e1c6a993427..308ed7c5e5487 100644 --- a/frame/session/src/tests.rs +++ b/frame/session/src/tests.rs @@ -18,17 +18,19 @@ // Tests for the Session Pallet use super::*; -use codec::Decode; -use frame_support::{assert_noop, assert_ok, traits::OnInitialize}; -use mock::{ +use crate::mock::{ authorities, before_session_end_called, force_new_session, new_test_ext, reset_before_session_end_called, session_changed, set_next_validators, set_session_length, - Origin, PreUpgradeMockSessionKeys, Session, System, Test, SESSION_CHANGED, + Origin, PreUpgradeMockSessionKeys, Session, System, Test, TestValidatorIdOf, SESSION_CHANGED, TEST_SESSION_CHANGED, }; + +use codec::Decode; use sp_core::crypto::key_types::DUMMY; use sp_runtime::testing::UintAuthorityId; +use frame_support::{assert_noop, assert_ok, traits::OnInitialize}; + fn initialize_block(block: u64) { SESSION_CHANGED.with(|l| *l.borrow_mut() = false); System::set_block_number(block); @@ -70,11 +72,35 @@ fn keys_cleared_on_kill() { }) } +#[test] +fn purge_keys_works_for_stash_id() { + let mut ext = new_test_ext(); + ext.execute_with(|| { + assert_eq!(Session::validators(), vec![1, 2, 3]); + TestValidatorIdOf::set(vec![(10, 1), (20, 2), (3, 3)].into_iter().collect()); + assert_eq!(Session::load_keys(&1), Some(UintAuthorityId(1).into())); + assert_eq!(Session::load_keys(&2), Some(UintAuthorityId(2).into())); + + let id = DUMMY; + assert_eq!(Session::key_owner(id, UintAuthorityId(1).get_raw(id)), Some(1)); + + assert_ok!(Session::purge_keys(Origin::signed(10))); + assert_ok!(Session::purge_keys(Origin::signed(2))); + + assert_eq!(Session::load_keys(&10), None); + assert_eq!(Session::load_keys(&20), None); + assert_eq!(Session::key_owner(id, UintAuthorityId(10).get_raw(id)), None); + assert_eq!(Session::key_owner(id, UintAuthorityId(20).get_raw(id)), None); + }) +} + #[test] fn authorities_should_track_validators() { reset_before_session_end_called(); new_test_ext().execute_with(|| { + TestValidatorIdOf::set(vec![(1, 1), (2, 2), (3, 3), (4, 4)].into_iter().collect()); + set_next_validators(vec![1, 2]); force_new_session(); initialize_block(1); @@ -185,6 +211,8 @@ fn session_change_should_work() { #[test] fn duplicates_are_not_allowed() { new_test_ext().execute_with(|| { + TestValidatorIdOf::set(vec![(1, 1), (2, 2), (3, 3), (4, 4)].into_iter().collect()); + System::set_block_number(1); Session::on_initialize(1); assert_noop!( @@ -203,6 +231,7 @@ fn session_changed_flag_works() { reset_before_session_end_called(); new_test_ext().execute_with(|| { + TestValidatorIdOf::set(vec![(1, 1), (2, 2), (3, 3), (69, 69)].into_iter().collect()); TEST_SESSION_CHANGED.with(|l| *l.borrow_mut() = true); force_new_session(); @@ -336,7 +365,7 @@ fn session_keys_generate_output_works_as_set_keys_input() { } #[test] -fn return_true_if_more_than_third_is_disabled() { +fn disable_index_returns_false_if_already_disabled() { new_test_ext().execute_with(|| { set_next_validators(vec![1, 2, 3, 4, 5, 6, 7]); force_new_session(); @@ -345,10 +374,9 @@ fn return_true_if_more_than_third_is_disabled() { force_new_session(); initialize_block(2); + assert_eq!(Session::disable_index(0), true); assert_eq!(Session::disable_index(0), false); - assert_eq!(Session::disable_index(1), false); - assert_eq!(Session::disable_index(2), true); - assert_eq!(Session::disable_index(3), true); + assert_eq!(Session::disable_index(1), true); }); } diff --git a/frame/society/Cargo.toml b/frame/society/Cargo.toml index 942b2844195f2..ab2c379c51b5c 100644 --- a/frame/society/Cargo.toml +++ b/frame/society/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME society pallet" readme = "README.md" diff --git a/frame/staking/Cargo.toml b/frame/staking/Cargo.toml index 70637bcd7726f..4b608bd91dc76 100644 --- a/frame/staking/Cargo.toml +++ b/frame/staking/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet staking" readme = "README.md" diff --git a/frame/staking/fuzzer/src/mock.rs b/frame/staking/fuzzer/src/mock.rs index 921e0d3b48d7d..d5ca78193b0c0 100644 --- a/frame/staking/fuzzer/src/mock.rs +++ b/frame/staking/fuzzer/src/mock.rs @@ -119,7 +119,7 @@ impl pallet_session::SessionHandler for TestSessionHandler { _: &[(AccountId, Ks)], ) {} - fn on_disabled(_: usize) {} + fn on_disabled(_: u32) {} } impl pallet_session::Config for Test { diff --git a/frame/staking/reward-curve/Cargo.toml b/frame/staking/reward-curve/Cargo.toml index 4cbc2473cb526..e44188bf7894e 100644 --- a/frame/staking/reward-curve/Cargo.toml +++ b/frame/staking/reward-curve/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Reward Curve for FRAME staking pallet" @@ -15,8 +15,8 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -syn = { version = "1.0.58", features = ["full", "visit"] } -quote = "1.0.3" +syn = { version = "1.0.80", features = ["full", "visit"] } +quote = "1.0.10" proc-macro2 = "1.0.29" proc-macro-crate = "1.0.0" diff --git a/frame/staking/reward-fn/Cargo.toml b/frame/staking/reward-fn/Cargo.toml index 076e05bf2a61e..ae0b7f50c994c 100644 --- a/frame/staking/reward-fn/Cargo.toml +++ b/frame/staking/reward-fn/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Reward function for FRAME staking pallet" diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index f3def7206320c..220e8f1e6a24c 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -23,6 +23,7 @@ use testing_utils::*; use frame_election_provider_support::SortedListProvider; use frame_support::{ + dispatch::UnfilteredDispatchable, pallet_prelude::*, traits::{Currency, CurrencyToVote, Get, Imbalance}, }; @@ -764,9 +765,15 @@ benchmarks! { >::insert(current_era, total_payout); let caller: T::AccountId = whitelisted_caller(); + let origin = RawOrigin::Signed(caller); + let calls: Vec<_> = payout_calls_arg.iter().map(|arg| + Call::::payout_stakers { validator_stash: arg.0.clone(), era: arg.1 }.encode() + ).collect(); }: { - for arg in payout_calls_arg { - >::payout_stakers(RawOrigin::Signed(caller.clone()).into(), arg.0, arg.1)?; + for call in calls { + as Decode>::decode(&mut &*call) + .expect("call is encoded above, encoding must be correct") + .dispatch_bypass_filter(origin.clone().into())?; } } @@ -880,6 +887,13 @@ benchmarks! { verify { assert!(!T::SortedListProvider::contains(&stash)); } + + impl_benchmark_test_suite!( + Staking, + crate::mock::ExtBuilder::default().has_stakers(true), + crate::mock::Test, + exec_name = build_and_execute + ); } #[cfg(test)] @@ -994,10 +1008,3 @@ mod tests { }); } } - -impl_benchmark_test_suite!( - Staking, - crate::mock::ExtBuilder::default().has_stakers(true), - crate::mock::Test, - exec_name = build_and_execute -); diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index 136515a5d6168..be02e8d91d326 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -272,6 +272,7 @@ //! validators is stored in the Session pallet's `Validators` at the end of each era. #![cfg_attr(not(feature = "std"), no_std)] +#![recursion_limit = "256"] #[cfg(feature = "runtime-benchmarks")] pub mod benchmarking; @@ -478,7 +479,9 @@ impl } /// Re-bond funds that were scheduled for unlocking. - fn rebond(mut self, value: Balance) -> Self { + /// + /// Returns the updated ledger, and the amount actually rebonded. + fn rebond(mut self, value: Balance) -> (Self, Balance) { let mut unlocking_balance: Balance = Zero::zero(); while let Some(last) = self.unlocking.last_mut() { @@ -499,7 +502,7 @@ impl } } - self + (self, unlocking_balance) } } @@ -616,12 +619,9 @@ pub struct UnappliedSlash { /// /// This is needed because `Staking` sets the `ValidatorIdOf` of the `pallet_session::Config` pub trait SessionInterface: frame_system::Config { - /// Disable a given validator by stash ID. - /// - /// Returns `true` if new era should be forced at the end of this session. - /// This allows preventing a situation where there is too many validators - /// disabled and block production stalls. - fn disable_validator(validator: &AccountId) -> Result; + /// Disable the validator at the given index, returns `false` if the validator was already + /// disabled or the index is out of bounds. + fn disable_validator(validator_index: u32) -> bool; /// Get the validators from session. fn validators() -> Vec; /// Prune historical session tries up to but not including the given index. @@ -642,8 +642,8 @@ where Option<::AccountId>, >, { - fn disable_validator(validator: &::AccountId) -> Result { - >::disable(validator) + fn disable_validator(validator_index: u32) -> bool { + >::disable_index(validator_index) } fn validators() -> Vec<::AccountId> { diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index b3ce8e063cb61..95d397359f8d6 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -22,8 +22,7 @@ use frame_election_provider_support::{onchain, SortedListProvider}; use frame_support::{ assert_ok, parameter_types, traits::{ - Currency, FindAuthor, GenesisBuild, Get, Hooks, Imbalance, OnInitialize, OnUnbalanced, - OneSessionHandler, + Currency, FindAuthor, GenesisBuild, Get, Hooks, Imbalance, OnUnbalanced, OneSessionHandler, }, weights::constants::RocksDbWeight, }; @@ -35,7 +34,7 @@ use sp_runtime::{ traits::{IdentityLookup, Zero}, }; use sp_staking::offence::{OffenceDetails, OnOffenceHandler}; -use std::{cell::RefCell, collections::HashSet}; +use std::cell::RefCell; pub const INIT_TIMESTAMP: u64 = 30_000; pub const BLOCK_TIME: u64 = 1000; @@ -46,10 +45,6 @@ pub(crate) type AccountIndex = u64; pub(crate) type BlockNumber = u64; pub(crate) type Balance = u128; -thread_local! { - static SESSION: RefCell<(Vec, HashSet)> = RefCell::new(Default::default()); -} - /// Another session handler struct to test on_disabled. pub struct OtherSessionHandler; impl OneSessionHandler for OtherSessionHandler { @@ -62,23 +57,14 @@ impl OneSessionHandler for OtherSessionHandler { { } - fn on_new_session<'a, I: 'a>(_: bool, validators: I, _: I) + fn on_new_session<'a, I: 'a>(_: bool, _: I, _: I) where I: Iterator, AccountId: 'a, { - SESSION.with(|x| { - *x.borrow_mut() = (validators.map(|x| x.0.clone()).collect(), HashSet::new()) - }); } - fn on_disabled(validator_index: usize) { - SESSION.with(|d| { - let mut d = d.borrow_mut(); - let value = d.0[validator_index]; - d.1.insert(value); - }) - } + fn on_disabled(_validator_index: u32) {} } impl sp_runtime::BoundToRuntimeAppPublic for OtherSessionHandler { @@ -87,7 +73,12 @@ impl sp_runtime::BoundToRuntimeAppPublic for OtherSessionHandler { pub fn is_disabled(controller: AccountId) -> bool { let stash = Staking::ledger(&controller).unwrap().stash; - SESSION.with(|d| d.borrow().1.contains(&stash)) + let validator_index = match Session::validators().iter().position(|v| *v == stash) { + Some(index) => index as u32, + None => return false, + }; + + Session::disabled_validators().contains(&validator_index) } type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; @@ -172,7 +163,6 @@ impl pallet_balances::Config for Test { } parameter_types! { pub const UncleGenerations: u64 = 0; - pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(25); } sp_runtime::impl_opaque_keys! { pub struct SessionKeys { @@ -187,7 +177,6 @@ impl pallet_session::Config for Test { type Event = Event; type ValidatorId = AccountId; type ValidatorIdOf = crate::StashOf; - type DisabledValidatorsThreshold = DisabledValidatorsThreshold; type NextSessionRotation = pallet_session::PeriodicSessions; type WeightInfo = (); } @@ -225,6 +214,7 @@ parameter_types! { pub const BondingDuration: EraIndex = 3; pub const RewardCurve: &'static PiecewiseLinear<'static> = &I_NPOS; pub const MaxNominatorRewardedPerValidator: u32 = 64; + pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(75); } thread_local! { @@ -278,6 +268,7 @@ impl crate::pallet::pallet::Config for Test { type EraPayout = ConvertCurve; type NextNewSession = Session; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; + type OffendingValidatorsThreshold = OffendingValidatorsThreshold; type ElectionProvider = onchain::OnChainSequentialPhragmen; type GenesisElectionProvider = Self::ElectionProvider; type WeightInfo = (); @@ -511,10 +502,6 @@ impl ExtBuilder { .assimilate_storage(&mut storage); let mut ext = sp_io::TestExternalities::from(storage); - ext.execute_with(|| { - let validators = Session::validators(); - SESSION.with(|x| *x.borrow_mut() = (validators.clone(), HashSet::new())); - }); if self.initialize_first_session { // We consider all test to start after timestamp is initialized This must be ensured by diff --git a/frame/staking/src/pallet/impls.rs b/frame/staking/src/pallet/impls.rs index 3ae520872f278..02099d8543d4c 100644 --- a/frame/staking/src/pallet/impls.rs +++ b/frame/staking/src/pallet/impls.rs @@ -302,6 +302,13 @@ impl Pallet { Self::start_era(start_session); } } + + // disable all offending validators that have been disabled for the whole era + for (index, disabled) in >::get() { + if disabled { + T::SessionInterface::disable_validator(index); + } + } } /// End a session potentially ending an era. @@ -374,6 +381,9 @@ impl Pallet { // Set ending era reward. >::insert(&active_era.index, validator_payout); T::RewardRemainder::on_unbalanced(T::Currency::issue(rest)); + + // Clear offending validators. + >::kill(); } } diff --git a/frame/staking/src/pallet/mod.rs b/frame/staking/src/pallet/mod.rs index c71130a3492b1..8e97a90e07544 100644 --- a/frame/staking/src/pallet/mod.rs +++ b/frame/staking/src/pallet/mod.rs @@ -141,6 +141,10 @@ pub mod pallet { #[pallet::constant] type MaxNominatorRewardedPerValidator: Get; + /// The fraction of the validator set that is safe to be offending. + /// After the threshold is reached a new era will be forced. + type OffendingValidatorsThreshold: Get; + /// Something that can provide a sorted list of voters in a somewhat sorted way. The /// original use case for this was designed with [`pallet_bags_list::Pallet`] in mind. If /// the bags-list is not desired, [`impls::UseNominatorsMap`] is likely the desired option. @@ -437,6 +441,19 @@ pub mod pallet { #[pallet::getter(fn current_planned_session)] pub type CurrentPlannedSession = StorageValue<_, SessionIndex, ValueQuery>; + /// Indices of validators that have offended in the active era and whether they are currently + /// disabled. + /// + /// This value should be a superset of disabled validators since not all offences lead to the + /// validator being disabled (if there was no slash). This is needed to track the percentage of + /// validators that have offended in the current era, ensuring a new era is forced if + /// `OffendingValidatorsThreshold` is reached. The vec is always kept sorted so that we can find + /// whether a given validator has previously offended using binary search. It gets cleared when + /// the era ends. + #[pallet::storage] + #[pallet::getter(fn offending_validators)] + pub type OffendingValidators = StorageValue<_, Vec<(u32, bool)>, ValueQuery>; + /// True if network has been upgraded to this version. /// Storage version of the pallet. /// @@ -1348,11 +1365,11 @@ pub mod pallet { ensure!(!ledger.unlocking.is_empty(), Error::::NoUnlockChunk); let initial_unlocking = ledger.unlocking.len() as u32; - let ledger = ledger.rebond(value); + let (ledger, rebonded_value) = ledger.rebond(value); // Last check: the new active amount of ledger must be more than ED. ensure!(ledger.active >= T::Currency::minimum_balance(), Error::::InsufficientBond); - Self::deposit_event(Event::::Bonded(ledger.stash.clone(), value)); + Self::deposit_event(Event::::Bonded(ledger.stash.clone(), rebonded_value)); // NOTE: ledger must be updated prior to calling `Self::weight_of`. Self::update_ledger(&controller, &ledger); diff --git a/frame/staking/src/slashing.rs b/frame/staking/src/slashing.rs index 15ca85b4d046f..68088d0e0d777 100644 --- a/frame/staking/src/slashing.rs +++ b/frame/staking/src/slashing.rs @@ -56,7 +56,7 @@ use crate::{ use codec::{Decode, Encode}; use frame_support::{ ensure, - traits::{Currency, Imbalance, OnUnbalanced}, + traits::{Currency, Get, Imbalance, OnUnbalanced}, }; use scale_info::TypeInfo; use sp_runtime::{ @@ -278,15 +278,13 @@ pub(crate) fn compute_slash( // not continue in the next election. also end the slashing span. spans.end_span(now); >::chill_stash(stash); - - // make sure to disable validator till the end of this session - if T::SessionInterface::disable_validator(stash).unwrap_or(false) { - // force a new era, to select a new validator set - >::ensure_new_era() - } } } + // add the validator to the offenders list and make sure it is disabled for + // the duration of the era + add_offending_validator::(params.stash, true); + let mut nominators_slashed = Vec::new(); reward_payout += slash_nominators::(params, prior_slash_p, &mut nominators_slashed); @@ -316,13 +314,53 @@ fn kick_out_if_recent(params: SlashParams) { if spans.era_span(params.slash_era).map(|s| s.index) == Some(spans.span_index()) { spans.end_span(params.now); >::chill_stash(params.stash); + } + + // add the validator to the offenders list but since there's no slash being + // applied there's no need to disable the validator + add_offending_validator::(params.stash, false); +} + +/// Add the given validator to the offenders list and optionally disable it. +/// If after adding the validator `OffendingValidatorsThreshold` is reached +/// a new era will be forced. +fn add_offending_validator(stash: &T::AccountId, disable: bool) { + as Store>::OffendingValidators::mutate(|offending| { + let validators = T::SessionInterface::validators(); + let validator_index = match validators.iter().position(|i| i == stash) { + Some(index) => index, + None => return, + }; - // make sure to disable validator till the end of this session - if T::SessionInterface::disable_validator(params.stash).unwrap_or(false) { - // force a new era, to select a new validator set - >::ensure_new_era() + let validator_index_u32 = validator_index as u32; + + match offending.binary_search_by_key(&validator_index_u32, |(index, _)| *index) { + // this is a new offending validator + Err(index) => { + offending.insert(index, (validator_index_u32, disable)); + + let offending_threshold = + T::OffendingValidatorsThreshold::get() * validators.len() as u32; + + if offending.len() >= offending_threshold as usize { + // force a new era, to select a new validator set + >::ensure_new_era() + } + + if disable { + T::SessionInterface::disable_validator(validator_index_u32); + } + }, + Ok(index) => { + if disable && !offending[index].1 { + // the validator had previously offended without being disabled, + // let's make sure we disable it now + offending[index].1 = true; + T::SessionInterface::disable_validator(validator_index_u32); + } + }, } - } + }); } /// Slash nominators. Accepts general parameters and the prior slash percentage of the validator. diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 5e7fe3d6266aa..d6d92d5bd57fc 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -23,7 +23,7 @@ use frame_support::{ assert_noop, assert_ok, dispatch::WithPostDispatchInfo, pallet_prelude::*, - traits::{Currency, Get, OnInitialize, ReservableCurrency}, + traits::{Currency, Get, ReservableCurrency}, weights::{extract_actual_weight, GetDispatchInfo}, }; use mock::*; @@ -1517,6 +1517,65 @@ fn rebond_is_fifo() { }) } +#[test] +fn rebond_emits_right_value_in_event() { + // When a user calls rebond with more than can be rebonded, things succeed, + // and the rebond event emits the actual value rebonded. + ExtBuilder::default().nominate(false).build_and_execute(|| { + // Set payee to controller. avoids confusion + assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Controller)); + + // Give account 11 some large free balance greater than total + let _ = Balances::make_free_balance_be(&11, 1000000); + + // confirm that 10 is a normal validator and gets paid at the end of the era. + mock::start_active_era(1); + + // Unbond almost all of the funds in stash. + Staking::unbond(Origin::signed(10), 900).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 100, + unlocking: vec![UnlockChunk { value: 900, era: 1 + 3 }], + claimed_rewards: vec![], + }) + ); + + // Re-bond less than the total + Staking::rebond(Origin::signed(10), 100).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 200, + unlocking: vec![UnlockChunk { value: 800, era: 1 + 3 }], + claimed_rewards: vec![], + }) + ); + // Event emitted should be correct + assert_eq!(*staking_events().last().unwrap(), Event::Bonded(11, 100)); + + // Re-bond way more than available + Staking::rebond(Origin::signed(10), 100_000).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); + // Event emitted should be correct, only 800 + assert_eq!(*staking_events().last().unwrap(), Event::Bonded(11, 800)); + }); +} + #[test] fn reward_to_stake_works() { ExtBuilder::default() @@ -2259,10 +2318,11 @@ fn slash_in_old_span_does_not_deselect() { 1, ); - // not forcing for zero-slash and previous span. - assert_eq!(Staking::force_era(), Forcing::NotForcing); - assert!(>::contains_key(11)); - assert!(Session::validators().contains(&11)); + // the validator doesn't get chilled again + assert!(::Validators::iter().find(|(stash, _)| *stash == 11).is_some()); + + // but we are still forcing a new era + assert_eq!(Staking::force_era(), Forcing::ForceNew); on_offence_in_era( &[OffenceDetails { @@ -2274,10 +2334,13 @@ fn slash_in_old_span_does_not_deselect() { 1, ); - // or non-zero. - assert_eq!(Staking::force_era(), Forcing::NotForcing); - assert!(>::contains_key(11)); - assert!(Session::validators().contains(&11)); + // the validator doesn't get chilled again + assert!(::Validators::iter().find(|(stash, _)| *stash == 11).is_some()); + + // but it's disabled + assert!(is_disabled(10)); + // and we are still forcing a new era + assert_eq!(Staking::force_era(), Forcing::ForceNew); }); } @@ -2908,6 +2971,132 @@ fn slash_kicks_validators_not_nominators_and_disables_nominator_for_kicked_valid }); } +#[test] +fn non_slashable_offence_doesnt_disable_validator() { + ExtBuilder::default().build_and_execute(|| { + mock::start_active_era(1); + assert_eq_uvec!(Session::validators(), vec![11, 21]); + + let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); + let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); + + // offence with no slash associated + on_offence_now( + &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], + &[Perbill::zero()], + ); + + // offence that slashes 25% of the bond + on_offence_now( + &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], + &[Perbill::from_percent(25)], + ); + + // the offence for validator 10 wasn't slashable so it wasn't disabled + assert!(!is_disabled(10)); + // whereas validator 20 gets disabled + assert!(is_disabled(20)); + }); +} + +#[test] +fn offence_threshold_triggers_new_era() { + ExtBuilder::default() + .validator_count(4) + .set_status(41, StakerStatus::Validator) + .build_and_execute(|| { + mock::start_active_era(1); + assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41]); + + assert_eq!( + ::OffendingValidatorsThreshold::get(), + Perbill::from_percent(75), + ); + + // we have 4 validators and an offending validator threshold of 75%, + // once the third validator commits an offence a new era should be forced + + let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); + let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); + let exposure_31 = Staking::eras_stakers(Staking::active_era().unwrap().index, &31); + + on_offence_now( + &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], + &[Perbill::zero()], + ); + + assert_eq!(ForceEra::::get(), Forcing::NotForcing); + + on_offence_now( + &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], + &[Perbill::zero()], + ); + + assert_eq!(ForceEra::::get(), Forcing::NotForcing); + + on_offence_now( + &[OffenceDetails { offender: (31, exposure_31.clone()), reporters: vec![] }], + &[Perbill::zero()], + ); + + assert_eq!(ForceEra::::get(), Forcing::ForceNew); + }); +} + +#[test] +fn disabled_validators_are_kept_disabled_for_whole_era() { + ExtBuilder::default() + .validator_count(4) + .set_status(41, StakerStatus::Validator) + .build_and_execute(|| { + mock::start_active_era(1); + assert_eq_uvec!(Session::validators(), vec![11, 21, 31, 41]); + assert_eq!(::SessionsPerEra::get(), 3); + + let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); + let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); + + on_offence_now( + &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], + &[Perbill::zero()], + ); + + on_offence_now( + &[OffenceDetails { offender: (21, exposure_21.clone()), reporters: vec![] }], + &[Perbill::from_percent(25)], + ); + + // validator 10 should not be disabled since the offence wasn't slashable + assert!(!is_disabled(10)); + // validator 20 gets disabled since it got slashed + assert!(is_disabled(20)); + + advance_session(); + + // disabled validators should carry-on through all sessions in the era + assert!(!is_disabled(10)); + assert!(is_disabled(20)); + + // validator 10 should now get disabled + on_offence_now( + &[OffenceDetails { offender: (11, exposure_11.clone()), reporters: vec![] }], + &[Perbill::from_percent(25)], + ); + + advance_session(); + + // and both are disabled in the last session of the era + assert!(is_disabled(10)); + assert!(is_disabled(20)); + + mock::start_active_era(2); + + // when a new era starts disabled validators get cleared + assert!(!is_disabled(10)); + assert!(!is_disabled(20)); + }); +} + #[test] fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { // should check that: diff --git a/frame/sudo/Cargo.toml b/frame/sudo/Cargo.toml index baacb66d5c751..3587a234566ec 100644 --- a/frame/sudo/Cargo.toml +++ b/frame/sudo/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet for sudo" readme = "README.md" diff --git a/frame/sudo/README.md b/frame/sudo/README.md index ac7de01615f3f..60090db46a4fc 100644 --- a/frame/sudo/README.md +++ b/frame/sudo/README.md @@ -65,6 +65,6 @@ You need to set an initial superuser account as the sudo `key`. [`Call`]: ./enum.Call.html [`Config`]: ./trait.Config.html -[`Origin`]: https://docs.substrate.dev/docs/substrate-types +[`Origin`]: https://docs.substrate.io/v3/runtime/origins License: Apache-2.0 diff --git a/frame/sudo/src/lib.rs b/frame/sudo/src/lib.rs index bab93ffcee162..427455849bb00 100644 --- a/frame/sudo/src/lib.rs +++ b/frame/sudo/src/lib.rs @@ -89,7 +89,7 @@ //! //! * [Democracy](../pallet_democracy/index.html) //! -//! [`Origin`]: https://docs.substrate.dev/docs/substrate-types +//! [`Origin`]: https://docs.substrate.io/v3/runtime/origins #![cfg_attr(not(feature = "std"), no_std)] diff --git a/frame/support/Cargo.toml b/frame/support/Cargo.toml index b09ed65a114dc..edb0ecd6442e8 100644 --- a/frame/support/Cargo.toml +++ b/frame/support/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Support code for the runtime." readme = "README.md" @@ -31,12 +31,12 @@ once_cell = { version = "1", default-features = false, optional = true } sp-state-machine = { version = "0.10.0-dev", optional = true, path = "../../primitives/state-machine" } bitflags = "1.3" impl-trait-for-tuples = "0.2.1" -smallvec = "1.4.1" +smallvec = "1.7.0" log = { version = "0.4.14", default-features = false } [dev-dependencies] assert_matches = "1.3.0" -pretty_assertions = "0.6.1" +pretty_assertions = "1.0.0" frame-system = { version = "4.0.0-dev", path = "../system" } parity-util-mem = { version = "0.10.0", default-features = false, features = ["primitive-types"] } diff --git a/frame/support/procedural/Cargo.toml b/frame/support/procedural/Cargo.toml index e1ff6dcf39b7e..ed152c25fc3b7 100644 --- a/frame/support/procedural/Cargo.toml +++ b/frame/support/procedural/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Proc macro of Support code for the runtime." @@ -17,9 +17,9 @@ proc-macro = true [dependencies] frame-support-procedural-tools = { version = "4.0.0-dev", path = "./tools" } proc-macro2 = "1.0.29" -quote = "1.0.3" +quote = "1.0.10" Inflector = "0.11.4" -syn = { version = "1.0.58", features = ["full"] } +syn = { version = "1.0.80", features = ["full"] } [features] default = ["std"] diff --git a/frame/support/procedural/src/construct_runtime/expand/origin.rs b/frame/support/procedural/src/construct_runtime/expand/origin.rs index a65ad78527ff7..57adf86a9fe18 100644 --- a/frame/support/procedural/src/construct_runtime/expand/origin.rs +++ b/frame/support/procedural/src/construct_runtime/expand/origin.rs @@ -82,8 +82,9 @@ pub fn expand_outer_origin( Ok(quote! { #( #query_origin_part_macros )* - // WARNING: All instance must hold the filter `frame_system::Config::BaseCallFilter`, except - // when caller is system Root. One can use `OriginTrait::reset_filter` to do so. + /// The runtime origin type represanting the origin of a call. + /// + /// Origin is always created with the base filter configured in `frame_system::Config::BaseCallFilter`. #[derive(Clone)] pub struct Origin { caller: OriginCaller, @@ -140,7 +141,11 @@ pub fn expand_outer_origin( } fn filter_call(&self, call: &Self::Call) -> bool { - (self.filter)(call) + match self.caller { + // Root bypasses all filters + OriginCaller::system(#system_path::Origin::<#runtime>::Root) => true, + _ => (self.filter)(call), + } } fn caller(&self) -> &Self::PalletsOrigin { @@ -157,15 +162,14 @@ pub fn expand_outer_origin( } } - /// Create with system none origin and `frame-system::Config::BaseCallFilter`. fn none() -> Self { #system_path::RawOrigin::None.into() } - /// Create with system root origin and no filter. + fn root() -> Self { #system_path::RawOrigin::Root.into() } - /// Create with system signed origin and `frame-system::Config::BaseCallFilter`. + fn signed(by: <#runtime as #system_path::Config>::AccountId) -> Self { #system_path::RawOrigin::Signed(by).into() } @@ -191,7 +195,7 @@ pub fn expand_outer_origin( pub fn none() -> Self { ::none() } - /// Create with system root origin and no filter. + /// Create with system root origin and `frame-system::Config::BaseCallFilter`. pub fn root() -> Self { ::root() } @@ -221,9 +225,7 @@ pub fn expand_outer_origin( } impl From<#system_path::Origin<#runtime>> for Origin { - /// Convert to runtime origin: - /// * root origin is built with no filter - /// * others use `frame-system::Config::BaseCallFilter` + /// Convert to runtime origin, using as filter: `frame-system::Config::BaseCallFilter`. fn from(x: #system_path::Origin<#runtime>) -> Self { let o: OriginCaller = x.into(); o.into() @@ -237,10 +239,7 @@ pub fn expand_outer_origin( filter: #scrate::sp_std::rc::Rc::new(Box::new(|_| true)), }; - // Root has no filter - if !matches!(o.caller, OriginCaller::system(#system_path::Origin::<#runtime>::Root)) { - #scrate::traits::OriginTrait::reset_filter(&mut o); - } + #scrate::traits::OriginTrait::reset_filter(&mut o); o } diff --git a/frame/support/procedural/src/construct_runtime/mod.rs b/frame/support/procedural/src/construct_runtime/mod.rs index 8aacd8f0aa810..04bb2ead645d2 100644 --- a/frame/support/procedural/src/construct_runtime/mod.rs +++ b/frame/support/procedural/src/construct_runtime/mod.rs @@ -132,7 +132,7 @@ fn construct_runtime_parsed(definition: RuntimeDefinition) -> Result( } fn decl_pallet_runtime_setup( + runtime: &Ident, pallet_declarations: &[Pallet], scrate: &TokenStream2, ) -> TokenStream2 { - let names = pallet_declarations.iter().map(|d| &d.name); - let names2 = pallet_declarations.iter().map(|d| &d.name); + let names = pallet_declarations.iter().map(|d| &d.name).collect::>(); let name_strings = pallet_declarations.iter().map(|d| d.name.to_string()); + let module_names = pallet_declarations.iter().map(|d| d.path.module_name()); let indices = pallet_declarations.iter().map(|pallet| pallet.index as usize); + let pallet_structs = pallet_declarations + .iter() + .map(|pallet| { + let path = &pallet.path; + match pallet.instance.as_ref() { + Some(inst) => quote!(#path::Pallet<#runtime, #path::#inst>), + None => quote!(#path::Pallet<#runtime>), + } + }) + .collect::>(); quote!( /// Provides an implementation of `PalletInfo` to provide information @@ -264,13 +275,37 @@ fn decl_pallet_runtime_setup( fn name() -> Option<&'static str> { let type_id = #scrate::sp_std::any::TypeId::of::

(); #( - if type_id == #scrate::sp_std::any::TypeId::of::<#names2>() { + if type_id == #scrate::sp_std::any::TypeId::of::<#names>() { return Some(#name_strings) } )* None } + + fn module_name() -> Option<&'static str> { + let type_id = #scrate::sp_std::any::TypeId::of::

(); + #( + if type_id == #scrate::sp_std::any::TypeId::of::<#names>() { + return Some(#module_names) + } + )* + + None + } + + fn crate_version() -> Option<#scrate::traits::CrateVersion> { + let type_id = #scrate::sp_std::any::TypeId::of::

(); + #( + if type_id == #scrate::sp_std::any::TypeId::of::<#names>() { + return Some( + <#pallet_structs as #scrate::traits::PalletInfoAccess>::crate_version() + ) + } + )* + + None + } } ) } diff --git a/frame/support/procedural/src/construct_runtime/parse.rs b/frame/support/procedural/src/construct_runtime/parse.rs index 6f2fd82e73f4b..a0ec6dfa5803e 100644 --- a/frame/support/procedural/src/construct_runtime/parse.rs +++ b/frame/support/procedural/src/construct_runtime/parse.rs @@ -188,6 +188,18 @@ pub struct PalletPath { pub inner: Path, } +impl PalletPath { + pub fn module_name(&self) -> String { + self.inner.segments.iter().fold(String::new(), |mut acc, segment| { + if !acc.is_empty() { + acc.push_str("::"); + } + acc.push_str(&segment.ident.to_string()); + acc + }) + } +} + impl Parse for PalletPath { fn parse(input: ParseStream) -> Result { let mut lookahead = input.lookahead1(); diff --git a/frame/support/procedural/src/crate_version.rs b/frame/support/procedural/src/crate_version.rs new file mode 100644 index 0000000000000..cfa35c6190e15 --- /dev/null +++ b/frame/support/procedural/src/crate_version.rs @@ -0,0 +1,54 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Implementation of macros related to crate versioning. + +use super::get_cargo_env_var; +use frame_support_procedural_tools::generate_crate_access_2018; +use proc_macro2::{Span, TokenStream}; +use syn::{Error, Result}; + +/// Create an error that will be shown by rustc at the call site of the macro. +fn create_error(message: &str) -> Error { + Error::new(Span::call_site(), message) +} + +/// Implementation of the `crate_to_crate_version!` macro. +pub fn crate_to_crate_version(input: proc_macro::TokenStream) -> Result { + if !input.is_empty() { + return Err(create_error("No arguments expected!")) + } + + let major_version = get_cargo_env_var::("CARGO_PKG_VERSION_MAJOR") + .map_err(|_| create_error("Major version needs to fit into `u16`"))?; + + let minor_version = get_cargo_env_var::("CARGO_PKG_VERSION_MINOR") + .map_err(|_| create_error("Minor version needs to fit into `u8`"))?; + + let patch_version = get_cargo_env_var::("CARGO_PKG_VERSION_PATCH") + .map_err(|_| create_error("Patch version needs to fit into `u8`"))?; + + let crate_ = generate_crate_access_2018("frame-support")?; + + Ok(quote::quote! { + #crate_::traits::CrateVersion { + major: #major_version, + minor: #minor_version, + patch: #patch_version, + } + }) +} diff --git a/frame/support/procedural/src/lib.rs b/frame/support/procedural/src/lib.rs index a8ac022c35c6b..6987fc49b9a8c 100644 --- a/frame/support/procedural/src/lib.rs +++ b/frame/support/procedural/src/lib.rs @@ -21,6 +21,7 @@ mod clone_no_bound; mod construct_runtime; +mod crate_version; mod debug_no_bound; mod default_no_bound; mod dummy_part_checker; @@ -31,7 +32,7 @@ mod storage; mod transactional; use proc_macro::TokenStream; -use std::cell::RefCell; +use std::{cell::RefCell, str::FromStr}; pub(crate) use storage::INHERENT_INSTANCE_NAME; thread_local! { @@ -52,6 +53,16 @@ impl Counter { } } +/// Get the value from the given environment variable set by cargo. +/// +/// The value is parsed into the requested destination type. +fn get_cargo_env_var(version_env: &str) -> std::result::Result { + let version = std::env::var(version_env) + .unwrap_or_else(|_| panic!("`{}` is always set by cargo; qed", version_env)); + + T::from_str(&version).map_err(drop) +} + /// Declares strongly-typed wrappers around codec-compatible types in storage. /// /// ## Example @@ -462,6 +473,13 @@ pub fn require_transactional(attr: TokenStream, input: TokenStream) -> TokenStre .unwrap_or_else(|e| e.to_compile_error().into()) } +#[proc_macro] +pub fn crate_to_crate_version(input: TokenStream) -> TokenStream { + crate_version::crate_to_crate_version(input) + .unwrap_or_else(|e| e.to_compile_error()) + .into() +} + /// The number of module instances supported by the runtime, starting at index 1, /// and up to `NUMBER_OF_INSTANCE`. pub(crate) const NUMBER_OF_INSTANCE: u8 = 16; diff --git a/frame/support/procedural/src/pallet/expand/constants.rs b/frame/support/procedural/src/pallet/expand/constants.rs index 7cc245e8089df..20106c71cbf07 100644 --- a/frame/support/procedural/src/pallet/expand/constants.rs +++ b/frame/support/procedural/src/pallet/expand/constants.rs @@ -26,6 +26,8 @@ struct ConstDef { pub doc: Vec, /// default_byte implementation pub default_byte_impl: proc_macro2::TokenStream, + /// Constant name for Metadata (optional) + pub metadata_name: Option, } /// @@ -35,6 +37,7 @@ pub fn expand_constants(def: &mut Def) -> proc_macro2::TokenStream { let type_impl_gen = &def.type_impl_generics(proc_macro2::Span::call_site()); let type_use_gen = &def.type_use_generics(proc_macro2::Span::call_site()); let pallet_ident = &def.pallet_struct.pallet; + let trait_use_gen = &def.trait_use_generics(proc_macro2::Span::call_site()); let mut where_clauses = vec![&def.config.where_clause]; where_clauses.extend(def.extra_constants.iter().map(|d| &d.where_clause)); @@ -49,9 +52,11 @@ pub fn expand_constants(def: &mut Def) -> proc_macro2::TokenStream { type_: const_.type_.clone(), doc: const_.doc.clone(), default_byte_impl: quote::quote!( - let value = >::get(); + let value = <::#ident as + #frame_support::traits::Get<#const_type>>::get(); #frame_support::codec::Encode::encode(&value) ), + metadata_name: None, } }); @@ -66,13 +71,14 @@ pub fn expand_constants(def: &mut Def) -> proc_macro2::TokenStream { let value = >::#ident(); #frame_support::codec::Encode::encode(&value) ), + metadata_name: const_.metadata_name.clone(), } }); let consts = config_consts.chain(extra_consts).map(|const_| { let const_type = &const_.type_; - let ident = &const_.ident; - let ident_str = format!("{}", ident); + let ident_str = format!("{}", const_.metadata_name.unwrap_or(const_.ident)); + let doc = const_.doc.clone().into_iter(); let default_byte_impl = &const_.default_byte_impl; diff --git a/frame/support/procedural/src/pallet/expand/error.rs b/frame/support/procedural/src/pallet/expand/error.rs index 7a058bb32c922..c6925db07a26f 100644 --- a/frame/support/procedural/src/pallet/expand/error.rs +++ b/frame/support/procedural/src/pallet/expand/error.rs @@ -69,7 +69,7 @@ pub fn expand_error(def: &mut Def) -> proc_macro2::TokenStream { if get_doc_literals(&error_item.attrs).is_empty() { error_item.attrs.push(syn::parse_quote!( #[doc = r" - Custom [dispatch errors](https://substrate.dev/docs/en/knowledgebase/runtime/errors) + Custom [dispatch errors](https://docs.substrate.io/v3/runtime/events-and-errors) of this pallet. "] )); diff --git a/frame/support/procedural/src/pallet/expand/event.rs b/frame/support/procedural/src/pallet/expand/event.rs index ebd2d7aeabaff..625c2d98baac5 100644 --- a/frame/support/procedural/src/pallet/expand/event.rs +++ b/frame/support/procedural/src/pallet/expand/event.rs @@ -98,7 +98,7 @@ pub fn expand_event(def: &mut Def) -> proc_macro2::TokenStream { if get_doc_literals(&event_item.attrs).is_empty() { event_item.attrs.push(syn::parse_quote!( #[doc = r" - The [event](https://substrate.dev/docs/en/knowledgebase/runtime/events) emitted + The [event](https://docs.substrate.io/v3/runtime/events-and-errors) emitted by this pallet. "] )); diff --git a/frame/support/procedural/src/pallet/expand/genesis_config.rs b/frame/support/procedural/src/pallet/expand/genesis_config.rs index 4bbba2c05908e..b2eb2166165cb 100644 --- a/frame/support/procedural/src/pallet/expand/genesis_config.rs +++ b/frame/support/procedural/src/pallet/expand/genesis_config.rs @@ -89,7 +89,7 @@ pub fn expand_genesis_config(def: &mut Def) -> proc_macro2::TokenStream { attrs.push(syn::parse_quote!( #[doc = r" Can be used to configure the - [genesis state](https://substrate.dev/docs/en/knowledgebase/integrate/chain-spec#the-genesis-state) + [genesis state](https://docs.substrate.io/v3/runtime/chain-specs#the-genesis-state) of this pallet. "] )); diff --git a/frame/support/procedural/src/pallet/expand/mod.rs b/frame/support/procedural/src/pallet/expand/mod.rs index 1c8883977c765..083ad61fc5239 100644 --- a/frame/support/procedural/src/pallet/expand/mod.rs +++ b/frame/support/procedural/src/pallet/expand/mod.rs @@ -72,9 +72,9 @@ pub fn expand(mut def: Def) -> proc_macro2::TokenStream { def.item.attrs.push(syn::parse_quote!( #[doc = r" The module that hosts all the - [FRAME](https://substrate.dev/docs/en/knowledgebase/runtime/frame) + [FRAME](https://docs.substrate.io/v3/runtime/frame) types needed to add this pallet to a - [runtime](https://substrate.dev/docs/en/knowledgebase/runtime/). + runtime. "] )); } diff --git a/frame/support/procedural/src/pallet/expand/pallet_struct.rs b/frame/support/procedural/src/pallet/expand/pallet_struct.rs index a217742fec55d..57e814b6b8438 100644 --- a/frame/support/procedural/src/pallet/expand/pallet_struct.rs +++ b/frame/support/procedural/src/pallet/expand/pallet_struct.rs @@ -62,7 +62,7 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { if get_doc_literals(&pallet_item.attrs).is_empty() { pallet_item.attrs.push(syn::parse_quote!( #[doc = r" - The [pallet](https://substrate.dev/docs/en/knowledgebase/runtime/pallets) implementing + The [pallet](https://docs.substrate.io/v3/runtime/frame#pallets) implementing the on-chain logic. "] )); @@ -98,28 +98,39 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { ) }; - // Depending on the flag `generate_storage_info` we use partial or full storage info from - // storage. - let (storage_info_span, storage_info_trait, storage_info_method) = - if let Some(span) = def.pallet_struct.generate_storage_info { - ( - span, - quote::quote_spanned!(span => StorageInfoTrait), - quote::quote_spanned!(span => storage_info), - ) - } else { - let span = def.pallet_struct.attr_span; - ( - span, - quote::quote_spanned!(span => PartialStorageInfoTrait), - quote::quote_spanned!(span => partial_storage_info), - ) - }; + let storage_info_span = + def.pallet_struct.generate_storage_info.unwrap_or(def.pallet_struct.attr_span); let storage_names = &def.storages.iter().map(|storage| &storage.ident).collect::>(); let storage_cfg_attrs = &def.storages.iter().map(|storage| &storage.cfg_attrs).collect::>(); + // Depending on the flag `generate_storage_info` and the storage attribute `unbounded`, we use + // partial or full storage info from storage. + let storage_info_traits = &def + .storages + .iter() + .map(|storage| { + if storage.unbounded || def.pallet_struct.generate_storage_info.is_none() { + quote::quote_spanned!(storage_info_span => PartialStorageInfoTrait) + } else { + quote::quote_spanned!(storage_info_span => StorageInfoTrait) + } + }) + .collect::>(); + + let storage_info_methods = &def + .storages + .iter() + .map(|storage| { + if storage.unbounded || def.pallet_struct.generate_storage_info.is_none() { + quote::quote_spanned!(storage_info_span => partial_storage_info) + } else { + quote::quote_spanned!(storage_info_span => storage_info) + } + }) + .collect::>(); + let storage_info = quote::quote_spanned!(storage_info_span => impl<#type_impl_gen> #frame_support::traits::StorageInfoTrait for #pallet_ident<#type_use_gen> @@ -136,8 +147,8 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { { let mut storage_info = < #storage_names<#type_use_gen> - as #frame_support::traits::#storage_info_trait - >::#storage_info_method(); + as #frame_support::traits::#storage_info_traits + >::#storage_info_methods(); res.append(&mut storage_info); } )* @@ -208,6 +219,18 @@ pub fn expand_pallet_struct(def: &mut Def) -> proc_macro2::TokenStream { .expect("Pallet is part of the runtime because pallet `Config` trait is \ implemented by the runtime") } + + fn module_name() -> &'static str { + < + ::PalletInfo as #frame_support::traits::PalletInfo + >::module_name::() + .expect("Pallet is part of the runtime because pallet `Config` trait is \ + implemented by the runtime") + } + + fn crate_version() -> #frame_support::traits::CrateVersion { + #frame_support::crate_to_crate_version!() + } } #storage_info diff --git a/frame/support/procedural/src/pallet/parse/extra_constants.rs b/frame/support/procedural/src/pallet/parse/extra_constants.rs index c1324df6c22f1..a5f3c0a8c2dab 100644 --- a/frame/support/procedural/src/pallet/parse/extra_constants.rs +++ b/frame/support/procedural/src/pallet/parse/extra_constants.rs @@ -28,6 +28,7 @@ mod keyword { syn::custom_keyword!(compact); syn::custom_keyword!(T); syn::custom_keyword!(pallet); + syn::custom_keyword!(constant_name); } /// Definition of extra constants typically `impl Pallet { ... }` @@ -50,6 +51,29 @@ pub struct ExtraConstantDef { pub type_: syn::Type, /// The doc associated pub doc: Vec, + /// Optional MetaData Name + pub metadata_name: Option, +} + +/// Attributes for functions in extra_constants impl block. +/// Parse for `#[pallet::constant_name(ConstantName)]` +pub struct ExtraConstAttr { + metadata_name: syn::Ident, +} + +impl syn::parse::Parse for ExtraConstAttr { + fn parse(input: syn::parse::ParseStream) -> syn::Result { + input.parse::()?; + let content; + syn::bracketed!(content in input); + content.parse::()?; + content.parse::()?; + content.parse::()?; + + let metadata_name; + syn::parenthesized!(metadata_name in content); + Ok(ExtraConstAttr { metadata_name: metadata_name.parse::()? }) + } } impl ExtraConstantsDef { @@ -57,7 +81,10 @@ impl ExtraConstantsDef { let item = if let syn::Item::Impl(item) = item { item } else { - return Err(syn::Error::new(item.span(), "Invalid pallet::call, expected item impl")) + return Err(syn::Error::new( + item.span(), + "Invalid pallet::extra_constants, expected item impl", + )) }; let mut instances = vec![]; @@ -102,10 +129,23 @@ impl ExtraConstantsDef { syn::ReturnType::Type(_, type_) => *type_.clone(), }; + // parse metadata_name + let mut extra_constant_attrs: Vec = + helper::take_item_pallet_attrs(method)?; + + if extra_constant_attrs.len() > 1 { + let msg = + "Invalid attribute in pallet::constant_name, only one attribute is expected"; + return Err(syn::Error::new(extra_constant_attrs[1].metadata_name.span(), msg)) + } + + let metadata_name = extra_constant_attrs.pop().map(|attr| attr.metadata_name); + extra_constants.push(ExtraConstantDef { ident: method.sig.ident.clone(), type_, doc: get_doc_literals(&method.attrs), + metadata_name, }); } diff --git a/frame/support/procedural/src/pallet/parse/helper.rs b/frame/support/procedural/src/pallet/parse/helper.rs index 2590e86b58b0e..f5a7dc233cacb 100644 --- a/frame/support/procedural/src/pallet/parse/helper.rs +++ b/frame/support/procedural/src/pallet/parse/helper.rs @@ -139,6 +139,12 @@ impl MutItemAttrs for syn::ItemMod { } } +impl MutItemAttrs for syn::ImplItemMethod { + fn mut_item_attrs(&mut self) -> Option<&mut Vec> { + Some(&mut self.attrs) + } +} + /// Parse for `()` struct Unit; impl syn::parse::Parse for Unit { diff --git a/frame/support/procedural/src/pallet/parse/storage.rs b/frame/support/procedural/src/pallet/parse/storage.rs index 8075daacb6f44..cd29baf93d849 100644 --- a/frame/support/procedural/src/pallet/parse/storage.rs +++ b/frame/support/procedural/src/pallet/parse/storage.rs @@ -27,6 +27,7 @@ mod keyword { syn::custom_keyword!(pallet); syn::custom_keyword!(getter); syn::custom_keyword!(storage_prefix); + syn::custom_keyword!(unbounded); syn::custom_keyword!(OptionQuery); syn::custom_keyword!(ValueQuery); } @@ -34,15 +35,17 @@ mod keyword { /// Parse for one of the following: /// * `#[pallet::getter(fn dummy)]` /// * `#[pallet::storage_prefix = "CustomName"]` +/// * `#[pallet::unbounded]` pub enum PalletStorageAttr { Getter(syn::Ident, proc_macro2::Span), StorageName(syn::LitStr, proc_macro2::Span), + Unbounded(proc_macro2::Span), } impl PalletStorageAttr { fn attr_span(&self) -> proc_macro2::Span { match self { - Self::Getter(_, span) | Self::StorageName(_, span) => *span, + Self::Getter(_, span) | Self::StorageName(_, span) | Self::Unbounded(span) => *span, } } } @@ -76,12 +79,45 @@ impl syn::parse::Parse for PalletStorageAttr { })?; Ok(Self::StorageName(renamed_prefix, attr_span)) + } else if lookahead.peek(keyword::unbounded) { + content.parse::()?; + + Ok(Self::Unbounded(attr_span)) } else { Err(lookahead.error()) } } } +struct PalletStorageAttrInfo { + getter: Option, + rename_as: Option, + unbounded: bool, +} + +impl PalletStorageAttrInfo { + fn from_attrs(attrs: Vec) -> syn::Result { + let mut getter = None; + let mut rename_as = None; + let mut unbounded = false; + for attr in attrs { + match attr { + PalletStorageAttr::Getter(ident, ..) if getter.is_none() => getter = Some(ident), + PalletStorageAttr::StorageName(name, ..) if rename_as.is_none() => + rename_as = Some(name), + PalletStorageAttr::Unbounded(..) if !unbounded => unbounded = true, + attr => + return Err(syn::Error::new( + attr.attr_span(), + "Invalid attribute: Duplicate attribute", + )), + } + } + + Ok(PalletStorageAttrInfo { getter, rename_as, unbounded }) + } +} + /// The value and key types used by storages. Needed to expand metadata. pub enum Metadata { Value { value: syn::Type }, @@ -131,6 +167,8 @@ pub struct StorageDef { /// generics of the storage. /// If generics are not named, this is none. pub named_generics: Option, + /// If the value stored in this storage is unbounded. + pub unbounded: bool, } /// The parsed generic from the @@ -629,25 +667,8 @@ impl StorageDef { }; let attrs: Vec = helper::take_item_pallet_attrs(&mut item.attrs)?; - let (mut getters, mut names) = attrs - .into_iter() - .partition::, _>(|attr| matches!(attr, PalletStorageAttr::Getter(..))); - if getters.len() > 1 { - let msg = "Invalid pallet::storage, multiple argument pallet::getter found"; - return Err(syn::Error::new(getters[1].attr_span(), msg)) - } - if names.len() > 1 { - let msg = "Invalid pallet::storage, multiple argument pallet::storage_prefix found"; - return Err(syn::Error::new(names[1].attr_span(), msg)) - } - let getter = getters.pop().map(|attr| match attr { - PalletStorageAttr::Getter(ident, _) => ident, - _ => unreachable!(), - }); - let rename_as = names.pop().map(|attr| match attr { - PalletStorageAttr::StorageName(lit, _) => lit, - _ => unreachable!(), - }); + let PalletStorageAttrInfo { getter, rename_as, unbounded } = + PalletStorageAttrInfo::from_attrs(attrs)?; let cfg_attrs = helper::get_item_cfg_attrs(&item.attrs); @@ -704,6 +725,7 @@ impl StorageDef { where_clause, cfg_attrs, named_generics, + unbounded, }) } } diff --git a/frame/support/procedural/tools/Cargo.toml b/frame/support/procedural/tools/Cargo.toml index ee59f53287efa..6a8fb57b39bda 100644 --- a/frame/support/procedural/tools/Cargo.toml +++ b/frame/support/procedural/tools/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Proc macro helpers for procedural macros" @@ -14,6 +14,6 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] frame-support-procedural-tools-derive = { version = "3.0.0", path = "./derive" } proc-macro2 = "1.0.29" -quote = "1.0.3" -syn = { version = "1.0.58", features = ["full", "visit", "extra-traits"] } +quote = "1.0.10" +syn = { version = "1.0.80", features = ["full", "visit", "extra-traits"] } proc-macro-crate = "1.0.0" diff --git a/frame/support/procedural/tools/derive/Cargo.toml b/frame/support/procedural/tools/derive/Cargo.toml index 12ec6a69f3967..9f4e2d9dca2a5 100644 --- a/frame/support/procedural/tools/derive/Cargo.toml +++ b/frame/support/procedural/tools/derive/Cargo.toml @@ -4,7 +4,7 @@ version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Use to derive parsing for parsing struct." @@ -16,5 +16,5 @@ proc-macro = true [dependencies] proc-macro2 = "1.0.29" -quote = { version = "1.0.3", features = ["proc-macro"] } -syn = { version = "1.0.58", features = ["proc-macro" ,"full", "extra-traits", "parsing"] } +quote = { version = "1.0.10", features = ["proc-macro"] } +syn = { version = "1.0.80", features = ["proc-macro" ,"full", "extra-traits", "parsing"] } diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index 2e6777fee2af2..6dc7fb8a94cae 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -280,7 +280,7 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug + /// /// The following are reserved function signatures: /// -/// * `deposit_event`: Helper function for depositing an [event](https://docs.substrate.dev/docs/event-enum). +/// * `deposit_event`: Helper function for depositing an [event](https://docs.substrate.io/v3/runtime/events-and-errors). /// The default behavior is to call `deposit_event` from the [System /// module](../frame_system/index.html). However, you can write your own implementation for events /// in your runtime. To use the default behavior, add `fn deposit_event() = default;` to your @@ -2151,6 +2151,18 @@ macro_rules! decl_module { .expect("Pallet is part of the runtime because pallet `Config` trait is \ implemented by the runtime") } + + fn module_name() -> &'static str { + < + <$trait_instance as $system::Config>::PalletInfo as $crate::traits::PalletInfo + >::module_name::() + .expect("Pallet is part of the runtime because pallet `Config` trait is \ + implemented by the runtime") + } + + fn crate_version() -> $crate::traits::CrateVersion { + $crate::crate_to_crate_version!() + } } // Implement GetCallName for the Call. @@ -2529,8 +2541,8 @@ mod tests { use crate::{ metadata::*, traits::{ - Get, GetCallName, IntegrityTest, OnFinalize, OnIdle, OnInitialize, OnRuntimeUpgrade, - PalletInfo, + CrateVersion, Get, GetCallName, IntegrityTest, OnFinalize, OnIdle, OnInitialize, + OnRuntimeUpgrade, PalletInfo, }, weights::{DispatchClass, DispatchInfo, Pays, RuntimeDbWeight}, }; @@ -2631,6 +2643,22 @@ mod tests { return Some("Test") } + None + } + fn module_name() -> Option<&'static str> { + let type_id = sp_std::any::TypeId::of::

(); + if type_id == sp_std::any::TypeId::of::() { + return Some("tests") + } + + None + } + fn crate_version() -> Option { + let type_id = sp_std::any::TypeId::of::

(); + if type_id == sp_std::any::TypeId::of::() { + return Some(frame_support::crate_to_crate_version!()) + } + None } } diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index 459698707366d..f3b00c764bb35 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -702,6 +702,21 @@ pub use frame_support_procedural::DefaultNoBound; /// ``` pub use frame_support_procedural::require_transactional; +/// Convert the current crate version into a [`CrateVersion`](crate::traits::CrateVersion). +/// +/// It uses the `CARGO_PKG_VERSION_MAJOR`, `CARGO_PKG_VERSION_MINOR` and +/// `CARGO_PKG_VERSION_PATCH` environment variables to fetch the crate version. +/// This means that the [`CrateVersion`](crate::traits::CrateVersion) +/// object will correspond to the version of the crate the macro is called in! +/// +/// # Example +/// +/// ``` +/// # use frame_support::{traits::CrateVersion, crate_to_crate_version}; +/// const Version: CrateVersion = crate_to_crate_version!(); +/// ``` +pub use frame_support_procedural::crate_to_crate_version; + /// Return Err of the expression: `return Err($expression);`. /// /// Used as `fail!(expression)`. @@ -819,6 +834,7 @@ pub mod tests { StorageHasher, }; use codec::{Codec, EncodeLike}; + use frame_support::traits::CrateVersion; use sp_io::TestExternalities; use sp_std::result; @@ -832,6 +848,12 @@ pub mod tests { fn name() -> Option<&'static str> { unimplemented!("PanicPalletInfo mustn't be triggered by tests"); } + fn module_name() -> Option<&'static str> { + unimplemented!("PanicPalletInfo mustn't be triggered by tests"); + } + fn crate_version() -> Option { + unimplemented!("PanicPalletInfo mustn't be triggered by tests"); + } } pub trait Config: 'static { @@ -1411,15 +1433,17 @@ pub mod pallet_prelude { /// `::Foo`. /// /// To generate the full storage info (used for PoV calculation) use the attribute -/// `#[pallet::set_storage_max_encoded_len]`, e.g.: +/// `#[pallet::generate_storage_info]`, e.g.: /// ```ignore /// #[pallet::pallet] -/// #[pallet::set_storage_max_encoded_len] +/// #[pallet::generate_storage_info] /// pub struct Pallet(_); /// ``` /// /// This require all storage to implement the trait [`traits::StorageInfoTrait`], thus all keys /// and value types must bound [`pallet_prelude::MaxEncodedLen`]. +/// Some individual storage can opt-out from this constraint by using `#[pallet::unbounded]`, +/// see `#[pallet::storage]` documentation. /// /// As the macro implements [`traits::GetStorageVersion`], the current storage version needs to /// be communicated to the macro. This can be done by using the `storage_version` attribute: @@ -1721,6 +1745,11 @@ pub mod pallet_prelude { /// pub(super) type MyStorage = StorageMap<_, Blake2_128Concat, u32, u32>; /// ``` /// +/// The optional attribute `#[pallet::unbounded]` allows to declare the storage as unbounded. +/// When implementating the storage info (when #[pallet::generate_storage_info]` is specified +/// on the pallet struct placeholder), the size of the storage will be declared as unbounded. +/// This can be useful for storage which can never go into PoV (Proof of Validity). +/// /// The optional attributes `#[cfg(..)]` allow conditional compilation for the storage. /// /// E.g: diff --git a/frame/support/src/storage/bounded_btree_map.rs b/frame/support/src/storage/bounded_btree_map.rs index d0c0aa7c4f155..404814cb81693 100644 --- a/frame/support/src/storage/bounded_btree_map.rs +++ b/frame/support/src/storage/bounded_btree_map.rs @@ -20,7 +20,7 @@ use crate::{storage::StorageDecodeLength, traits::Get}; use codec::{Decode, Encode, MaxEncodedLen}; use sp_std::{ - borrow::Borrow, collections::btree_map::BTreeMap, convert::TryFrom, fmt, marker::PhantomData, + borrow::Borrow, collections::btree_map::BTreeMap, convert::TryFrom, marker::PhantomData, ops::Deref, }; @@ -173,12 +173,12 @@ where } #[cfg(feature = "std")] -impl fmt::Debug for BoundedBTreeMap +impl std::fmt::Debug for BoundedBTreeMap where - BTreeMap: fmt::Debug, + BTreeMap: std::fmt::Debug, S: Get, { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_tuple("BoundedBTreeMap").field(&self.0).field(&Self::bound()).finish() } } diff --git a/frame/support/src/storage/bounded_btree_set.rs b/frame/support/src/storage/bounded_btree_set.rs index 182884e655dd2..ecfb0bdbd261f 100644 --- a/frame/support/src/storage/bounded_btree_set.rs +++ b/frame/support/src/storage/bounded_btree_set.rs @@ -20,7 +20,7 @@ use crate::{storage::StorageDecodeLength, traits::Get}; use codec::{Decode, Encode, MaxEncodedLen}; use sp_std::{ - borrow::Borrow, collections::btree_set::BTreeSet, convert::TryFrom, fmt, marker::PhantomData, + borrow::Borrow, collections::btree_set::BTreeSet, convert::TryFrom, marker::PhantomData, ops::Deref, }; @@ -31,7 +31,8 @@ use sp_std::{ /// /// Unlike a standard `BTreeSet`, there is an enforced upper limit to the number of items in the /// set. All internal operations ensure this bound is respected. -#[derive(Encode)] +#[derive(Encode, scale_info::TypeInfo)] +#[scale_info(skip_type_params(S))] pub struct BoundedBTreeSet(BTreeSet, PhantomData); impl Decode for BoundedBTreeSet @@ -157,12 +158,12 @@ where } #[cfg(feature = "std")] -impl fmt::Debug for BoundedBTreeSet +impl std::fmt::Debug for BoundedBTreeSet where - BTreeSet: fmt::Debug, + BTreeSet: std::fmt::Debug, S: Get, { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_tuple("BoundedBTreeSet").field(&self.0).field(&Self::bound()).finish() } } diff --git a/frame/support/src/storage/bounded_vec.rs b/frame/support/src/storage/bounded_vec.rs index b45c294f8d4a4..e51c6cd734113 100644 --- a/frame/support/src/storage/bounded_vec.rs +++ b/frame/support/src/storage/bounded_vec.rs @@ -28,7 +28,7 @@ use core::{ ops::{Deref, Index, IndexMut}, slice::SliceIndex, }; -use sp_std::{convert::TryFrom, fmt, marker::PhantomData, prelude::*}; +use sp_std::{convert::TryFrom, marker::PhantomData, prelude::*}; /// A bounded vector. /// @@ -44,7 +44,8 @@ pub struct BoundedVec(Vec, PhantomData); /// A bounded slice. /// /// Similar to a `BoundedVec`, but not owned and cannot be decoded. -#[derive(Encode)] +#[derive(Encode, scale_info::TypeInfo)] +#[scale_info(skip_type_params(S))] pub struct BoundedSlice<'a, T, S>(&'a [T], PhantomData); // `BoundedSlice`s encode to something which will always decode into a `BoundedVec`, @@ -200,13 +201,12 @@ impl Default for BoundedVec { } } -#[cfg(feature = "std")] -impl fmt::Debug for BoundedVec +impl sp_std::fmt::Debug for BoundedVec where - T: fmt::Debug, + T: sp_std::fmt::Debug, S: Get, { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { f.debug_tuple("BoundedVec").field(&self.0).field(&Self::bound()).finish() } } @@ -337,7 +337,7 @@ where fn max_encoded_len() -> usize { // BoundedVec encodes like Vec which encodes like [T], which is a compact u32 // plus each item in the slice: - // https://substrate.dev/rustdocs/v3.0.0/src/parity_scale_codec/codec.rs.html#798-808 + // https://docs.substrate.io/v3/advanced/scale-codec codec::Compact(S::get()) .encoded_size() .saturating_add(Self::bound().saturating_mul(T::max_encoded_len())) diff --git a/frame/support/src/storage/weak_bounded_vec.rs b/frame/support/src/storage/weak_bounded_vec.rs index 9c30c45c3e2e1..823c50c55d0b9 100644 --- a/frame/support/src/storage/weak_bounded_vec.rs +++ b/frame/support/src/storage/weak_bounded_vec.rs @@ -27,7 +27,7 @@ use core::{ ops::{Deref, Index, IndexMut}, slice::SliceIndex, }; -use sp_std::{convert::TryFrom, fmt, marker::PhantomData, prelude::*}; +use sp_std::{convert::TryFrom, marker::PhantomData, prelude::*}; /// A weakly bounded vector. /// @@ -171,12 +171,12 @@ impl Default for WeakBoundedVec { } #[cfg(feature = "std")] -impl fmt::Debug for WeakBoundedVec +impl std::fmt::Debug for WeakBoundedVec where - T: fmt::Debug, + T: std::fmt::Debug, S: Get, { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_tuple("WeakBoundedVec").field(&self.0).field(&Self::bound()).finish() } } @@ -307,7 +307,7 @@ where fn max_encoded_len() -> usize { // WeakBoundedVec encodes like Vec which encodes like [T], which is a compact u32 // plus each item in the slice: - // https://substrate.dev/rustdocs/v3.0.0/src/parity_scale_codec/codec.rs.html#798-808 + // https://docs.substrate.io/v3/advanced/scale-codec codec::Compact(S::get()) .encoded_size() .saturating_add(Self::bound().saturating_mul(T::max_encoded_len())) diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index d5d0decd117eb..5ac0208dc2033 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -62,8 +62,8 @@ pub use randomness::Randomness; mod metadata; pub use metadata::{ - CallMetadata, GetCallMetadata, GetCallName, GetStorageVersion, PalletInfo, PalletInfoAccess, - StorageVersion, STORAGE_VERSION_STORAGE_KEY_POSTFIX, + CallMetadata, CrateVersion, GetCallMetadata, GetCallName, GetStorageVersion, PalletInfo, + PalletInfoAccess, StorageVersion, STORAGE_VERSION_STORAGE_KEY_POSTFIX, }; mod hooks; diff --git a/frame/support/src/traits/dispatch.rs b/frame/support/src/traits/dispatch.rs index f82628ede18cc..92b832ba32961 100644 --- a/frame/support/src/traits/dispatch.rs +++ b/frame/support/src/traits/dispatch.rs @@ -70,7 +70,10 @@ pub trait OriginTrait: Sized { /// Replace the caller with caller from the other origin fn set_caller_from(&mut self, other: impl Into); - /// Filter the call, if false then call is filtered out. + /// Filter the call if caller is not root, if false is returned then the call must be filtered + /// out. + /// + /// For root origin caller, the filters are bypassed and true is returned. fn filter_call(&self, call: &Self::Call) -> bool; /// Get the caller. @@ -82,12 +85,12 @@ pub trait OriginTrait: Sized { f: impl FnOnce(Self::PalletsOrigin) -> Result, ) -> Result; - /// Create with system none origin and `frame-system::Config::BaseCallFilter`. + /// Create with system none origin and `frame_system::Config::BaseCallFilter`. fn none() -> Self; - /// Create with system root origin and no filter. + /// Create with system root origin and `frame_system::Config::BaseCallFilter`. fn root() -> Self; - /// Create with system signed origin and `frame-system::Config::BaseCallFilter`. + /// Create with system signed origin and `frame_system::Config::BaseCallFilter`. fn signed(by: Self::AccountId) -> Self; } diff --git a/frame/support/src/traits/hooks.rs b/frame/support/src/traits/hooks.rs index adba88e5acbf3..2a8b0a156247a 100644 --- a/frame/support/src/traits/hooks.rs +++ b/frame/support/src/traits/hooks.rs @@ -19,7 +19,7 @@ use impl_trait_for_tuples::impl_for_tuples; use sp_arithmetic::traits::Saturating; -use sp_runtime::traits::{AtLeast32BitUnsigned, MaybeSerializeDeserialize}; +use sp_runtime::traits::AtLeast32BitUnsigned; /// The block initialization trait. /// @@ -294,7 +294,7 @@ pub trait Hooks { /// A trait to define the build function of a genesis config, T and I are placeholder for pallet /// trait and pallet instance. #[cfg(feature = "std")] -pub trait GenesisBuild: Default + MaybeSerializeDeserialize { +pub trait GenesisBuild: Default + sp_runtime::traits::MaybeSerializeDeserialize { /// The build function is called within an externalities allowing storage APIs. /// Thus one can write to storage using regular pallet storages. fn build(&self); diff --git a/frame/support/src/traits/metadata.rs b/frame/support/src/traits/metadata.rs index e877f29e0a137..e60cf8be8a41c 100644 --- a/frame/support/src/traits/metadata.rs +++ b/frame/support/src/traits/metadata.rs @@ -20,7 +20,7 @@ use codec::{Decode, Encode}; use sp_runtime::RuntimeDebug; -/// Provides information about the pallet setup in the runtime. +/// Provides information about the pallet itself and its setup in the runtime. /// /// An implementor should be able to provide information about each pallet that /// is configured in `construct_runtime!`. @@ -29,16 +29,25 @@ pub trait PalletInfo { fn index() -> Option; /// Convert the given pallet `P` into its name as configured in the runtime. fn name() -> Option<&'static str>; + /// Convert the given pallet `P` into its Rust module name as used in `construct_runtime!`. + fn module_name() -> Option<&'static str>; + /// Convert the given pallet `P` into its containing crate version. + fn crate_version() -> Option; } -/// Provides information about the pallet setup in the runtime. +/// Provides information about the pallet itself and its setup in the runtime. /// -/// Access the information provided by [`PalletInfo`] for a specific pallet. +/// Declare some information and access the information provided by [`PalletInfo`] for a specific +/// pallet. pub trait PalletInfoAccess { /// Index of the pallet as configured in the runtime. fn index() -> usize; /// Name of the pallet as configured in the runtime. fn name() -> &'static str; + /// Name of the Rust module containing the pallet. + fn module_name() -> &'static str; + /// Version of the crate containing the pallet. + fn crate_version() -> CrateVersion; } /// The function and pallet name of the Call. @@ -68,6 +77,37 @@ pub trait GetCallMetadata { fn get_call_metadata(&self) -> CallMetadata; } +/// The version of a crate. +#[derive(RuntimeDebug, Eq, PartialEq, Encode, Decode, Clone, Copy, Default)] +pub struct CrateVersion { + /// The major version of the crate. + pub major: u16, + /// The minor version of the crate. + pub minor: u8, + /// The patch version of the crate. + pub patch: u8, +} + +impl CrateVersion { + pub const fn new(major: u16, minor: u8, patch: u8) -> Self { + Self { major, minor, patch } + } +} + +impl sp_std::cmp::Ord for CrateVersion { + fn cmp(&self, other: &Self) -> sp_std::cmp::Ordering { + self.major + .cmp(&other.major) + .then_with(|| self.minor.cmp(&other.minor).then_with(|| self.patch.cmp(&other.patch))) + } +} + +impl sp_std::cmp::PartialOrd for CrateVersion { + fn partial_cmp(&self, other: &Self) -> Option { + Some(::cmp(&self, other)) + } +} + /// The storage key postfix that is used to store the [`StorageVersion`] per pallet. /// /// The full storage key is built by using: diff --git a/frame/support/src/traits/misc.rs b/frame/support/src/traits/misc.rs index 75f2f8ac3fef1..9109bfeeae722 100644 --- a/frame/support/src/traits/misc.rs +++ b/frame/support/src/traits/misc.rs @@ -17,10 +17,11 @@ //! Smaller traits used in FRAME which don't need their own file. -use crate::{dispatch::Parameter, TypeInfo}; +use crate::dispatch::Parameter; use codec::{Decode, Encode, EncodeLike, Input, MaxEncodedLen}; +use scale_info::{build::Fields, meta_type, Path, Type, TypeInfo, TypeParameter}; use sp_runtime::{traits::Block as BlockT, DispatchError}; -use sp_std::vec::Vec; +use sp_std::prelude::*; /// Anything that can have a `::len()` method. pub trait Len { @@ -384,7 +385,7 @@ impl, const T: u32> EstimateCallFee for /// /// The encoding is the encoding of `T` prepended with the compact encoding of its size in bytes. /// Thus the encoded value can be decoded as a `Vec`. -#[derive(Debug, Eq, PartialEq, Default, Clone, MaxEncodedLen, TypeInfo)] +#[derive(Debug, Eq, PartialEq, Default, Clone)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub struct WrapperOpaque(pub T); @@ -392,8 +393,7 @@ impl EncodeLike for WrapperOpaque {} impl Encode for WrapperOpaque { fn size_hint(&self) -> usize { - // Compact usually takes at most 4 bytes - self.0.size_hint().saturating_add(4) + self.0.size_hint().saturating_add(>::max_encoded_len()) } fn encode_to(&self, dest: &mut O) { @@ -424,3 +424,68 @@ impl From for WrapperOpaque { Self(t) } } + +impl MaxEncodedLen for WrapperOpaque { + fn max_encoded_len() -> usize { + let t_max_len = T::max_encoded_len(); + + // See scale encoding https://docs.substrate.io/v3/advanced/scale-codec + if t_max_len < 64 { + t_max_len + 1 + } else if t_max_len < 2usize.pow(14) { + t_max_len + 2 + } else if t_max_len < 2usize.pow(30) { + t_max_len + 4 + } else { + >::max_encoded_len().saturating_add(T::max_encoded_len()) + } + } +} + +impl TypeInfo for WrapperOpaque { + type Identity = Self; + fn type_info() -> Type { + Type::builder() + .path(Path::new("WrapperOpaque", module_path!())) + .type_params(vec![TypeParameter::new("T", Some(meta_type::()))]) + .composite( + Fields::unnamed() + .field(|f| f.compact::()) + .field(|f| f.ty::().type_name("T")), + ) + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_opaque_wrapper() { + let encoded = WrapperOpaque(3u32).encode(); + assert_eq!(encoded, [codec::Compact(4u32).encode(), 3u32.to_le_bytes().to_vec()].concat()); + let vec_u8 = >::decode(&mut &encoded[..]).unwrap(); + let decoded_from_vec_u8 = u32::decode(&mut &vec_u8[..]).unwrap(); + assert_eq!(decoded_from_vec_u8, 3u32); + let decoded = >::decode(&mut &encoded[..]).unwrap(); + assert_eq!(decoded.0, 3u32); + + assert_eq!(>::max_encoded_len(), 63 + 1); + assert_eq!( + >::max_encoded_len(), + WrapperOpaque([0u8; 63]).encode().len() + ); + + assert_eq!(>::max_encoded_len(), 64 + 2); + assert_eq!( + >::max_encoded_len(), + WrapperOpaque([0u8; 64]).encode().len() + ); + + assert_eq!( + >::max_encoded_len(), + 2usize.pow(14) - 1 + 2 + ); + assert_eq!(>::max_encoded_len(), 2usize.pow(14) + 4); + } +} diff --git a/frame/support/src/traits/schedule.rs b/frame/support/src/traits/schedule.rs index a4a4f9c03ab12..19f50a93c0681 100644 --- a/frame/support/src/traits/schedule.rs +++ b/frame/support/src/traits/schedule.rs @@ -18,6 +18,7 @@ //! Traits and associated utilities for scheduling dispatchables in FRAME. use codec::{Codec, Decode, Encode, EncodeLike}; +use scale_info::TypeInfo; use sp_runtime::{DispatchError, RuntimeDebug}; use sp_std::{fmt::Debug, prelude::*}; @@ -31,7 +32,7 @@ pub type Period = (BlockNumber, u32); pub type Priority = u8; /// The dispatch time of a scheduled task. -#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug)] +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, TypeInfo)] pub enum DispatchTime { /// At specified block. At(BlockNumber), diff --git a/frame/support/src/traits/tokens.rs b/frame/support/src/traits/tokens.rs index aca62bcad65c7..91a9382d07fcc 100644 --- a/frame/support/src/traits/tokens.rs +++ b/frame/support/src/traits/tokens.rs @@ -26,6 +26,6 @@ pub mod nonfungible; pub mod nonfungibles; pub use imbalance::Imbalance; pub use misc::{ - BalanceConversion, BalanceStatus, DepositConsequence, ExistenceRequirement, + AssetId, Balance, BalanceConversion, BalanceStatus, DepositConsequence, ExistenceRequirement, WithdrawConsequence, WithdrawReasons, }; diff --git a/frame/support/src/traits/tokens/fungibles.rs b/frame/support/src/traits/tokens/fungibles.rs index 3f5a1c75860c2..b164a99671658 100644 --- a/frame/support/src/traits/tokens/fungibles.rs +++ b/frame/support/src/traits/tokens/fungibles.rs @@ -23,8 +23,11 @@ use super::{ }; use crate::dispatch::{DispatchError, DispatchResult}; use sp_runtime::traits::Saturating; +use sp_std::vec::Vec; +pub mod approvals; mod balanced; +pub mod metadata; pub use balanced::{Balanced, Unbalanced}; mod imbalance; pub use imbalance::{CreditOf, DebtOf, HandleImbalanceDrop, Imbalance}; @@ -65,6 +68,18 @@ pub trait Inspect { ) -> WithdrawConsequence; } +/// Trait for reading metadata from a fungible asset. +pub trait InspectMetadata: Inspect { + /// Return the name of an asset. + fn name(asset: &Self::AssetId) -> Vec; + + /// Return the symbol of an asset. + fn symbol(asset: &Self::AssetId) -> Vec; + + /// Return the decimals of an asset. + fn decimals(asset: &Self::AssetId) -> u8; +} + /// Trait for providing a set of named fungible assets which can be created and destroyed. pub trait Mutate: Inspect { /// Attempt to increase the `asset` balance of `who` by `amount`. @@ -227,3 +242,39 @@ impl + MutateHold> BalancedHold>::slash(asset, who, actual) } } + +/// Trait for providing the ability to create new fungible assets. +pub trait Create: Inspect { + /// Create a new fungible asset. + fn create( + id: Self::AssetId, + admin: AccountId, + is_sufficient: bool, + min_balance: Self::Balance, + ) -> DispatchResult; +} + +/// Trait for providing the ability to destroy existing fungible assets. +pub trait Destroy: Inspect { + /// The witness data needed to destroy an asset. + type DestroyWitness; + + /// Provide the appropriate witness data needed to destroy an asset. + fn get_destroy_witness(id: &Self::AssetId) -> Option; + + /// Destroy an existing fungible asset. + /// * `id`: The `AssetId` to be destroyed. + /// * `witness`: Any witness data that needs to be provided to complete the operation + /// successfully. + /// * `maybe_check_owner`: An optional account id that can be used to authorize the destroy + /// command. If not provided, we will not do any authorization checks before destroying the + /// asset. + /// + /// If successful, this function will return the actual witness data from the destroyed asset. + /// This may be different than the witness data provided, and can be used to refund weight. + fn destroy( + id: Self::AssetId, + witness: Self::DestroyWitness, + maybe_check_owner: Option, + ) -> Result; +} diff --git a/frame/support/src/traits/tokens/fungibles/approvals.rs b/frame/support/src/traits/tokens/fungibles/approvals.rs new file mode 100644 index 0000000000000..06e81b8591a80 --- /dev/null +++ b/frame/support/src/traits/tokens/fungibles/approvals.rs @@ -0,0 +1,43 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Inspect and Mutate traits for Asset approvals + +use crate::dispatch::DispatchResult; +pub trait Inspect: super::Inspect { + // Check the amount approved by an owner to be spent by a delegate + fn allowance(asset: Self::AssetId, owner: &AccountId, delegate: &AccountId) -> Self::Balance; +} + +pub trait Mutate: Inspect { + // Aprove a delegate account to spend an amount of tokens owned by an owner + fn approve( + asset: Self::AssetId, + owner: &AccountId, + delegate: &AccountId, + amount: Self::Balance, + ) -> DispatchResult; + + // Transfer from a delegate account an amount approved by the owner of the asset + fn transfer_from( + asset: Self::AssetId, + owner: &AccountId, + delegate: &AccountId, + dest: &AccountId, + amount: Self::Balance, + ) -> DispatchResult; +} diff --git a/frame/support/src/traits/tokens/fungibles/metadata.rs b/frame/support/src/traits/tokens/fungibles/metadata.rs new file mode 100644 index 0000000000000..08bf5c4710a7d --- /dev/null +++ b/frame/support/src/traits/tokens/fungibles/metadata.rs @@ -0,0 +1,41 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Inspect and Mutate traits for Asset metadata + +use crate::dispatch::DispatchResult; +use sp_std::vec::Vec; + +pub trait Inspect: super::Inspect { + // Get name for an AssetId. + fn name(asset: Self::AssetId) -> Vec; + // Get symbol for an AssetId. + fn symbol(asset: Self::AssetId) -> Vec; + // Get decimals for an AssetId. + fn decimals(asset: Self::AssetId) -> u8; +} + +pub trait Mutate: Inspect { + // Set name, symbol and decimals for a given assetId. + fn set( + asset: Self::AssetId, + from: &AccountId, + name: Vec, + symbol: Vec, + decimals: u8, + ) -> DispatchResult; +} diff --git a/frame/support/src/traits/tokens/misc.rs b/frame/support/src/traits/tokens/misc.rs index 214c28708a196..100138171abe7 100644 --- a/frame/support/src/traits/tokens/misc.rs +++ b/frame/support/src/traits/tokens/misc.rs @@ -161,8 +161,8 @@ impl WithdrawReasons { } /// Simple amalgamation trait to collect together properties for an AssetId under one roof. -pub trait AssetId: FullCodec + Copy + Eq + PartialEq + Debug {} -impl AssetId for T {} +pub trait AssetId: FullCodec + Copy + Eq + PartialEq + Debug + scale_info::TypeInfo {} +impl AssetId for T {} /// Simple amalgamation trait to collect together properties for a Balance under one roof. pub trait Balance: diff --git a/frame/support/src/traits/tokens/nonfungibles.rs b/frame/support/src/traits/tokens/nonfungibles.rs index 452ee2212d62a..b5a14761064f3 100644 --- a/frame/support/src/traits/tokens/nonfungibles.rs +++ b/frame/support/src/traits/tokens/nonfungibles.rs @@ -27,7 +27,7 @@ //! Implementations of these traits may be converted to implementations of corresponding //! `nonfungible` traits by using the `nonfungible::ItemOf` type adapter. -use crate::dispatch::DispatchResult; +use crate::dispatch::{DispatchError, DispatchResult}; use codec::{Decode, Encode}; use sp_runtime::TokenError; use sp_std::prelude::*; @@ -123,6 +123,31 @@ pub trait Create: Inspect { fn create_class(class: &Self::ClassId, who: &AccountId, admin: &AccountId) -> DispatchResult; } +/// Trait for providing the ability to destroy classes of nonfungible assets. +pub trait Destroy: Inspect { + /// The witness data needed to destroy an asset. + type DestroyWitness; + + /// Provide the appropriate witness data needed to destroy an asset. + fn get_destroy_witness(class: &Self::ClassId) -> Option; + + /// Destroy an existing fungible asset. + /// * `class`: The `ClassId` to be destroyed. + /// * `witness`: Any witness data that needs to be provided to complete the operation + /// successfully. + /// * `maybe_check_owner`: An optional account id that can be used to authorize the destroy + /// command. If not provided, we will not do any authorization checks before destroying the + /// asset. + /// + /// If successful, this function will return the actual witness data from the destroyed asset. + /// This may be different than the witness data provided, and can be used to refund weight. + fn destroy( + class: Self::ClassId, + witness: Self::DestroyWitness, + maybe_check_owner: Option, + ) -> Result; +} + /// Trait for providing an interface for multiple classes of NFT-like assets which may be minted, /// burned and/or have attributes set on them. pub trait Mutate: Inspect { diff --git a/frame/support/src/traits/validation.rs b/frame/support/src/traits/validation.rs index 11ea5a79f67ba..674f2d718fffa 100644 --- a/frame/support/src/traits/validation.rs +++ b/frame/support/src/traits/validation.rs @@ -109,7 +109,7 @@ pub trait OneSessionHandler: BoundToRuntimeAppPublic { fn on_before_session_ending() {} /// A validator got disabled. Act accordingly until a new session begins. - fn on_disabled(_validator_index: usize); + fn on_disabled(_validator_index: u32); } /// Something that can estimate at which block the next session rotation will happen (i.e. a new diff --git a/frame/support/src/weights.rs b/frame/support/src/weights.rs index 115470a9bf034..ec5f37823ad47 100644 --- a/frame/support/src/weights.rs +++ b/frame/support/src/weights.rs @@ -287,30 +287,6 @@ impl<'a> OneOrMany for &'a [DispatchClass] { } } -/// Primitives related to priority management of Frame. -pub mod priority { - /// The starting point of all Operational transactions. 3/4 of u64::MAX. - pub const LIMIT: u64 = 13_835_058_055_282_163_711_u64; - - /// Wrapper for priority of different dispatch classes. - /// - /// This only makes sure that any value created for the operational dispatch class is - /// incremented by [`LIMIT`]. - pub enum FrameTransactionPriority { - Normal(u64), - Operational(u64), - } - - impl From for u64 { - fn from(priority: FrameTransactionPriority) -> Self { - match priority { - FrameTransactionPriority::Normal(inner) => inner, - FrameTransactionPriority::Operational(inner) => inner.saturating_add(LIMIT), - } - } - } -} - /// A bundle of static information collected from the `#[weight = $x]` attributes. #[derive(Clone, Copy, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode, TypeInfo)] pub struct DispatchInfo { diff --git a/frame/support/test/Cargo.toml b/frame/support/test/Cargo.toml index 863afceac4a98..77fd4f5620969 100644 --- a/frame/support/test/Cargo.toml +++ b/frame/support/test/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" publish = false -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" [package.metadata.docs.rs] @@ -24,7 +24,7 @@ sp-core = { version = "4.0.0-dev", default-features = false, path = "../../../pr sp-std = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/std" } sp-version = { version = "4.0.0-dev", default-features = false, path = "../../../primitives/version" } trybuild = "1.0.43" -pretty_assertions = "0.6.1" +pretty_assertions = "1.0.0" rustversion = "1.0.0" frame-system = { version = "4.0.0-dev", default-features = false, path = "../../system" } # The "std" feature for this pallet is never activated on purpose, in order to test construct_runtime error message diff --git a/frame/support/test/pallet/Cargo.toml b/frame/support/test/pallet/Cargo.toml index 35eb4f34acae1..a3d101967ae65 100644 --- a/frame/support/test/pallet/Cargo.toml +++ b/frame/support/test/pallet/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" publish = false -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" [package.metadata.docs.rs] diff --git a/frame/support/test/src/lib.rs b/frame/support/test/src/lib.rs index 52c0a6270d47f..073f8c9c19352 100644 --- a/frame/support/test/src/lib.rs +++ b/frame/support/test/src/lib.rs @@ -49,6 +49,12 @@ impl frame_support::traits::PalletInfo for PanicPalletInfo { fn name() -> Option<&'static str> { unimplemented!("PanicPalletInfo mustn't be triggered by tests"); } + fn module_name() -> Option<&'static str> { + unimplemented!("PanicPalletInfo mustn't be triggered by tests"); + } + fn crate_version() -> Option { + unimplemented!("PanicPalletInfo mustn't be triggered by tests"); + } } /// Provides an implementation of [`frame_support::traits::Randomness`] that should only be used in diff --git a/frame/support/test/tests/construct_runtime.rs b/frame/support/test/tests/construct_runtime.rs index 062993fe10fbb..2d14da04f64b7 100644 --- a/frame/support/test/tests/construct_runtime.rs +++ b/frame/support/test/tests/construct_runtime.rs @@ -21,7 +21,7 @@ #![recursion_limit = "128"] -use frame_support::traits::PalletInfo as _; +use frame_support::traits::{CrateVersion, PalletInfo as _}; use scale_info::TypeInfo; use sp_core::{sr25519, H256}; use sp_runtime::{ @@ -327,19 +327,31 @@ mod origin_test { assert_eq!(Origin::from(super::nested::module3::Origin).filter_call(&rejected_call), false); let mut origin = Origin::from(Some(0)); - origin.add_filter(|c| matches!(c, Call::Module3(_))); assert_eq!(origin.filter_call(&accepted_call), false); assert_eq!(origin.filter_call(&rejected_call), false); + // Now test for root origin and filters: + let mut origin = Origin::from(Some(0)); origin.set_caller_from(Origin::root()); assert!(matches!(origin.caller, OriginCaller::system(super::system::RawOrigin::Root))); - assert_eq!(origin.filter_call(&accepted_call), false); + + // Root origin bypass all filter. + assert_eq!(origin.filter_call(&accepted_call), true); + assert_eq!(origin.filter_call(&rejected_call), true); + + origin.set_caller_from(Origin::from(Some(0))); + + // Back to another signed origin, the filtered are now effective again + assert_eq!(origin.filter_call(&accepted_call), true); assert_eq!(origin.filter_call(&rejected_call), false); + origin.set_caller_from(Origin::root()); origin.reset_filter(); + + // Root origin bypass all filter, even when they are reset. assert_eq!(origin.filter_call(&accepted_call), true); - assert_eq!(origin.filter_call(&rejected_call), false); + assert_eq!(origin.filter_call(&rejected_call), true); } } @@ -739,40 +751,66 @@ fn test_metadata() { fn pallet_in_runtime_is_correct() { assert_eq!(PalletInfo::index::().unwrap(), 30); assert_eq!(PalletInfo::name::().unwrap(), "System"); + assert_eq!(PalletInfo::module_name::().unwrap(), "system"); + assert_eq!(PalletInfo::crate_version::().unwrap(), CrateVersion::new(3, 0, 0)); assert_eq!(PalletInfo::index::().unwrap(), 31); assert_eq!(PalletInfo::name::().unwrap(), "Module1_1"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); + assert_eq!(PalletInfo::crate_version::().unwrap(), CrateVersion::new(3, 0, 0)); assert_eq!(PalletInfo::index::().unwrap(), 32); assert_eq!(PalletInfo::name::().unwrap(), "Module2"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module2"); + assert_eq!(PalletInfo::crate_version::().unwrap(), CrateVersion::new(3, 0, 0)); assert_eq!(PalletInfo::index::().unwrap(), 33); assert_eq!(PalletInfo::name::().unwrap(), "Module1_2"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); + assert_eq!(PalletInfo::crate_version::().unwrap(), CrateVersion::new(3, 0, 0)); assert_eq!(PalletInfo::index::().unwrap(), 34); assert_eq!(PalletInfo::name::().unwrap(), "NestedModule3"); + assert_eq!(PalletInfo::module_name::().unwrap(), "nested::module3"); + assert_eq!(PalletInfo::crate_version::().unwrap(), CrateVersion::new(3, 0, 0)); assert_eq!(PalletInfo::index::().unwrap(), 35); assert_eq!(PalletInfo::name::().unwrap(), "Module3"); + assert_eq!(PalletInfo::module_name::().unwrap(), "self::module3"); + assert_eq!(PalletInfo::crate_version::().unwrap(), CrateVersion::new(3, 0, 0)); assert_eq!(PalletInfo::index::().unwrap(), 6); assert_eq!(PalletInfo::name::().unwrap(), "Module1_3"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); + assert_eq!(PalletInfo::crate_version::().unwrap(), CrateVersion::new(3, 0, 0)); assert_eq!(PalletInfo::index::().unwrap(), 3); assert_eq!(PalletInfo::name::().unwrap(), "Module1_4"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); + assert_eq!(PalletInfo::crate_version::().unwrap(), CrateVersion::new(3, 0, 0)); assert_eq!(PalletInfo::index::().unwrap(), 4); assert_eq!(PalletInfo::name::().unwrap(), "Module1_5"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); + assert_eq!(PalletInfo::crate_version::().unwrap(), CrateVersion::new(3, 0, 0)); assert_eq!(PalletInfo::index::().unwrap(), 1); assert_eq!(PalletInfo::name::().unwrap(), "Module1_6"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); + assert_eq!(PalletInfo::crate_version::().unwrap(), CrateVersion::new(3, 0, 0)); assert_eq!(PalletInfo::index::().unwrap(), 2); assert_eq!(PalletInfo::name::().unwrap(), "Module1_7"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); + assert_eq!(PalletInfo::crate_version::().unwrap(), CrateVersion::new(3, 0, 0)); assert_eq!(PalletInfo::index::().unwrap(), 12); assert_eq!(PalletInfo::name::().unwrap(), "Module1_8"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); + assert_eq!(PalletInfo::crate_version::().unwrap(), CrateVersion::new(3, 0, 0)); assert_eq!(PalletInfo::index::().unwrap(), 13); assert_eq!(PalletInfo::name::().unwrap(), "Module1_9"); + assert_eq!(PalletInfo::module_name::().unwrap(), "module1"); + assert_eq!(PalletInfo::crate_version::().unwrap(), CrateVersion::new(3, 0, 0)); } diff --git a/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr b/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr index 5bc831f58988b..3dc7fcda9f18a 100644 --- a/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr +++ b/frame/support/test/tests/construct_runtime_ui/no_std_genesis_config.stderr @@ -10,7 +10,7 @@ error: `Pallet` does not have the std feature enabled, this will cause the `test 22 | | } | |_^ | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `test_pallet::__substrate_genesis_config_check::is_std_enabled_for_genesis` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0433]: failed to resolve: use of undeclared crate or module `system` --> $DIR/no_std_genesis_config.rs:19:11 @@ -30,7 +30,7 @@ error[E0433]: failed to resolve: use of undeclared crate or module `system` 22 | | } | |_^ not found in `system` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing this enum | 1 | use frame_system::RawOrigin; @@ -48,7 +48,7 @@ error[E0433]: failed to resolve: use of undeclared crate or module `system` 22 | | } | |_^ not found in `system` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing one of these items | 1 | use frame_support_test::Pallet; @@ -70,7 +70,7 @@ error[E0412]: cannot find type `GenesisConfig` in crate `test_pallet` 22 | | } | |_^ not found in `test_pallet` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing this struct | 1 | use frame_system::GenesisConfig; diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr index 8781fe0df201a..2629cf4101923 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_call_part.stderr @@ -13,7 +13,7 @@ error: `Pallet` does not have #[pallet::call] defined, perhaps you should remove 31 | | } | |_- in this macro invocation | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `pallet::__substrate_call_check::is_call_part_defined` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0433]: failed to resolve: use of undeclared crate or module `system` --> $DIR/undefined_call_part.rs:28:11 @@ -33,7 +33,7 @@ error[E0433]: failed to resolve: use of undeclared crate or module `system` 31 | | } | |_^ not found in `system` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing this enum | 1 | use frame_system::RawOrigin; @@ -51,7 +51,7 @@ error[E0433]: failed to resolve: use of undeclared crate or module `system` 31 | | } | |_^ not found in `system` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing one of these items | 1 | use crate::pallet::Pallet; diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr index fa837698aa642..af69b79ed1a64 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_event_part.stderr @@ -13,7 +13,7 @@ error: `Pallet` does not have #[pallet::event] defined, perhaps you should remov 31 | | } | |_- in this macro invocation | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `pallet::__substrate_event_check::is_event_part_defined` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0433]: failed to resolve: use of undeclared crate or module `system` --> $DIR/undefined_event_part.rs:28:11 @@ -33,7 +33,7 @@ error[E0412]: cannot find type `Event` in module `pallet` 31 | | } | |_^ not found in `pallet` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing this enum | 1 | use frame_system::Event; @@ -51,7 +51,7 @@ error[E0412]: cannot find type `Event` in module `pallet` 31 | | } | |_^ not found in `pallet` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing one of these items | 1 | use crate::Event; @@ -71,7 +71,7 @@ error[E0433]: failed to resolve: use of undeclared crate or module `system` 31 | | } | |_^ not found in `system` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing this enum | 1 | use frame_system::RawOrigin; @@ -89,7 +89,7 @@ error[E0433]: failed to resolve: use of undeclared crate or module `system` 31 | | } | |_^ not found in `system` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing one of these items | 1 | use crate::pallet::Pallet; diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr index 699f66a414ed2..bfedb921bca44 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_genesis_config_part.stderr @@ -13,7 +13,7 @@ error: `Pallet` does not have #[pallet::genesis_config] defined, perhaps you sho 31 | | } | |_- in this macro invocation | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `pallet::__substrate_genesis_config_check::is_genesis_config_defined` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0433]: failed to resolve: use of undeclared crate or module `system` --> $DIR/undefined_genesis_config_part.rs:28:17 @@ -33,7 +33,7 @@ error[E0433]: failed to resolve: use of undeclared crate or module `system` 31 | | } | |_^ not found in `system` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing this enum | 1 | use frame_system::RawOrigin; @@ -51,7 +51,7 @@ error[E0433]: failed to resolve: use of undeclared crate or module `system` 31 | | } | |_^ not found in `system` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing one of these items | 1 | use crate::pallet::Pallet; @@ -75,7 +75,7 @@ error[E0412]: cannot find type `GenesisConfig` in module `pallet` 31 | | } | |_^ not found in `pallet` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing this struct | 1 | use frame_system::GenesisConfig; diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr index 88ff9ee910937..50dde1108263b 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_inherent_part.stderr @@ -13,7 +13,7 @@ error: `Pallet` does not have #[pallet::inherent] defined, perhaps you should re 31 | | } | |_- in this macro invocation | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `pallet::__substrate_inherent_check::is_inherent_part_defined` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0433]: failed to resolve: use of undeclared crate or module `system` --> $DIR/undefined_inherent_part.rs:28:11 @@ -33,7 +33,7 @@ error[E0433]: failed to resolve: use of undeclared crate or module `system` 31 | | } | |_^ not found in `system` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing this enum | 1 | use frame_system::RawOrigin; @@ -51,7 +51,7 @@ error[E0433]: failed to resolve: use of undeclared crate or module `system` 31 | | } | |_^ not found in `system` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing one of these items | 1 | use crate::pallet::Pallet; diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr index 3b3aa75c1ea08..b5f3ec4d381bc 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_origin_part.stderr @@ -13,7 +13,7 @@ error: `Pallet` does not have #[pallet::origin] defined, perhaps you should remo 31 | | } | |_- in this macro invocation | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `pallet::__substrate_origin_check::is_origin_part_defined` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0433]: failed to resolve: use of undeclared crate or module `system` --> $DIR/undefined_origin_part.rs:28:11 @@ -33,7 +33,7 @@ error[E0433]: failed to resolve: use of undeclared crate or module `system` 31 | | } | |_^ not found in `system` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing this enum | 1 | use frame_system::RawOrigin; @@ -51,7 +51,7 @@ error[E0412]: cannot find type `Origin` in module `pallet` 31 | | } | |_^ not found in `pallet` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing this type alias | 1 | use frame_system::Origin; @@ -69,7 +69,7 @@ error[E0412]: cannot find type `Origin` in module `pallet` 31 | | } | |_^ not found in `pallet` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing one of these items | 1 | use crate::Origin; @@ -89,7 +89,7 @@ error[E0433]: failed to resolve: use of undeclared crate or module `system` 31 | | } | |_^ not found in `system` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing one of these items | 1 | use crate::pallet::Pallet; diff --git a/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr b/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr index ac12c56d5c279..12bdce67cf038 100644 --- a/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr +++ b/frame/support/test/tests/construct_runtime_ui/undefined_validate_unsigned_part.stderr @@ -13,7 +13,7 @@ error: `Pallet` does not have #[pallet::validate_unsigned] defined, perhaps you 31 | | } | |_- in this macro invocation | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `pallet::__substrate_validate_unsigned_check::is_validate_unsigned_part_defined` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0433]: failed to resolve: use of undeclared crate or module `system` --> $DIR/undefined_validate_unsigned_part.rs:28:11 @@ -33,7 +33,7 @@ error[E0433]: failed to resolve: use of undeclared crate or module `system` 31 | | } | |_^ not found in `system` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing this enum | 1 | use frame_system::RawOrigin; @@ -51,7 +51,7 @@ error[E0433]: failed to resolve: use of undeclared crate or module `system` 31 | | } | |_^ not found in `system` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `construct_runtime` (in Nightly builds, run with -Z macro-backtrace for more info) help: consider importing one of these items | 1 | use crate::pallet::Pallet; diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr index 3bf5f58b43a39..86c427d8080be 100644 --- a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_integrity_test.stderr @@ -10,7 +10,7 @@ error: `integrity_test` can only be passed once as input. 7 | | } | |_^ | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `$crate::decl_module` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0601]: `main` function not found in crate `$CRATE` --> $DIR/reserved_keyword_two_times_integrity_test.rs:1:1 diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs index ddde7c72c1cc5..18aaec12c5f39 100644 --- a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs @@ -9,3 +9,5 @@ frame_support::decl_module! { } } } + +fn main() {} diff --git a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr index 2911d7ded8a23..369be77b8d249 100644 --- a/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr +++ b/frame/support/test/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.stderr @@ -10,16 +10,4 @@ error: `on_initialize` can only be passed once as input. 11 | | } | |_^ | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) - -error[E0601]: `main` function not found in crate `$CRATE` - --> $DIR/reserved_keyword_two_times_on_initialize.rs:1:1 - | -1 | / frame_support::decl_module! { -2 | | pub struct Module for enum Call where origin: T::Origin, system=self { -3 | | fn on_initialize() -> Weight { -4 | | 0 -... | -10 | | } -11 | | } - | |_^ consider adding a `main` function to `$DIR/tests/decl_module_ui/reserved_keyword_two_times_on_initialize.rs` + = note: this error originates in the macro `$crate::decl_module` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/frame/support/test/tests/derive_no_bound_ui/clone.stderr b/frame/support/test/tests/derive_no_bound_ui/clone.stderr index 4b253ad12451b..050b576c8b9ed 100644 --- a/frame/support/test/tests/derive_no_bound_ui/clone.stderr +++ b/frame/support/test/tests/derive_no_bound_ui/clone.stderr @@ -1,7 +1,11 @@ error[E0277]: the trait bound `::C: Clone` is not satisfied - --> $DIR/clone.rs:7:2 - | -7 | c: T::C, - | ^ the trait `Clone` is not implemented for `::C` - | - = note: required by `clone` + --> $DIR/clone.rs:7:2 + | +7 | c: T::C, + | ^ the trait `Clone` is not implemented for `::C` + | +note: required by `clone` + --> $DIR/clone.rs:121:5 + | +121 | fn clone(&self) -> Self; + | ^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/derive_no_bound_ui/default.stderr b/frame/support/test/tests/derive_no_bound_ui/default.stderr index d58b5e9185268..7608f877a3b56 100644 --- a/frame/support/test/tests/derive_no_bound_ui/default.stderr +++ b/frame/support/test/tests/derive_no_bound_ui/default.stderr @@ -1,7 +1,11 @@ error[E0277]: the trait bound `::C: std::default::Default` is not satisfied - --> $DIR/default.rs:7:2 - | -7 | c: T::C, - | ^ the trait `std::default::Default` is not implemented for `::C` - | - = note: required by `std::default::Default::default` + --> $DIR/default.rs:7:2 + | +7 | c: T::C, + | ^ the trait `std::default::Default` is not implemented for `::C` + | +note: required by `std::default::Default::default` + --> $DIR/default.rs:116:5 + | +116 | fn default() -> Self; + | ^^^^^^^^^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet.rs b/frame/support/test/tests/pallet.rs index 6a9a18ea48d4b..dc72be3ebdd49 100644 --- a/frame/support/test/tests/pallet.rs +++ b/frame/support/test/tests/pallet.rs @@ -146,6 +146,12 @@ pub mod pallet { fn some_extra_extra() -> T::AccountId { SomeType1.into() } + + /// Some doc + #[pallet::constant_name(SomeExtraRename)] + fn some_extra_rename() -> T::AccountId { + SomeType1.into() + } } #[pallet::pallet] @@ -329,6 +335,10 @@ pub mod pallet { pub type SomeCountedStorageMap = CountedStorageMap; + #[pallet::storage] + #[pallet::unbounded] + pub type Unbounded = StorageValue>; + #[pallet::genesis_config] #[derive(Default)] pub struct GenesisConfig { @@ -553,7 +563,7 @@ frame_support::construct_runtime!( NodeBlock = Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: frame_system::{Pallet, Call, Event}, + System: frame_system::{Call, Event}, Example: pallet::{Pallet, Call, Event, Config, Storage, Inherent, Origin, ValidateUnsigned}, Example2: pallet2::{Pallet, Call, Event, Config, Storage}, } @@ -917,6 +927,10 @@ fn storage_expand() { assert_eq!(unhashed::get::(&k), Some(2u32)); let k = [twox_128(b"Example"), twox_128(b"CounterForRenamedCountedMap")].concat(); assert_eq!(unhashed::get::(&k), Some(1u32)); + + pallet::Unbounded::::put(vec![1, 2]); + let k = [twox_128(b"Example"), twox_128(b"Unbounded")].concat(); + assert_eq!(unhashed::get::>(&k), Some(vec![1, 2])); }) } @@ -1000,52 +1014,6 @@ fn metadata() { use frame_support::metadata::*; let pallets = vec![ - PalletMetadata { - index: 0, - name: "System", - storage: None, - calls: Some(meta_type::>().into()), - event: Some(meta_type::>().into()), - constants: vec![ - PalletConstantMetadata { - name: "BlockWeights", - ty: meta_type::(), - value: vec![], - docs: vec![], - }, - PalletConstantMetadata { - name: "BlockLength", - ty: meta_type::(), - value: vec![], - docs: vec![], - }, - PalletConstantMetadata { - name: "BlockHashCount", - ty: meta_type::(), - value: vec![], - docs: vec![], - }, - PalletConstantMetadata { - name: "DbWeight", - ty: meta_type::(), - value: vec![], - docs: vec![], - }, - PalletConstantMetadata { - name: "Version", - ty: meta_type::(), - value: vec![], - docs: vec![], - }, - PalletConstantMetadata { - name: "SS58Prefix", - ty: meta_type::(), - value: vec![], - docs: vec![], - }, - ], - error: Some(meta_type::>().into()), - }, PalletMetadata { index: 1, name: "Example", @@ -1216,6 +1184,13 @@ fn metadata() { default: vec![0, 0, 0, 0], docs: vec!["Counter for the related counted storage map"], }, + StorageEntryMetadata { + name: "Unbounded", + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain(meta_type::>()), + default: vec![0], + docs: vec![], + }, ], }), calls: Some(meta_type::>().into()), @@ -1251,174 +1226,41 @@ fn metadata() { value: vec![0, 0, 0, 0, 0, 0, 0, 0], docs: vec![" Some doc"], }, + PalletConstantMetadata { + name: "SomeExtraRename", + ty: meta_type::(), + value: vec![0, 0, 0, 0, 0, 0, 0, 0], + docs: vec![" Some doc"], + }, ], error: Some(PalletErrorMetadata { ty: meta_type::>() }), }, PalletMetadata { - index: 1, - name: "Example", + index: 2, + name: "Example2", storage: Some(PalletStorageMetadata { - prefix: "Example", + prefix: "Example2", entries: vec![ StorageEntryMetadata { - name: "ValueWhereClause", + name: "SomeValue", modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(meta_type::()), + ty: StorageEntryType::Plain(meta_type::>()), default: vec![0], docs: vec![], }, StorageEntryMetadata { - name: "Value", + name: "SomeCountedStorageMap", modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(meta_type::()), - default: vec![0], - docs: vec![], - }, - StorageEntryMetadata { - name: "Value2", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(meta_type::()), - default: vec![0], - docs: vec![], - }, - StorageEntryMetadata { - name: "Map", - modifier: StorageEntryModifier::Default, ty: StorageEntryType::Map { - key: meta_type::(), - value: meta_type::(), - hashers: vec![StorageHasher::Blake2_128Concat], - }, - default: vec![4, 0], - docs: vec![], - }, - StorageEntryMetadata { - name: "Map2", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - key: meta_type::(), - value: meta_type::(), hashers: vec![StorageHasher::Twox64Concat], - }, - default: vec![0], - docs: vec![], - }, - StorageEntryMetadata { - name: "DoubleMap", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - value: meta_type::(), - key: meta_type::<(u8, u16)>(), - hashers: vec![ - StorageHasher::Blake2_128Concat, - StorageHasher::Twox64Concat, - ], - }, - default: vec![0], - docs: vec![], - }, - StorageEntryMetadata { - name: "DoubleMap2", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - value: meta_type::(), - key: meta_type::<(u16, u32)>(), - hashers: vec![ - StorageHasher::Twox64Concat, - StorageHasher::Blake2_128Concat, - ], - }, - default: vec![0], - docs: vec![], - }, - StorageEntryMetadata { - name: "NMap", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { key: meta_type::(), - hashers: vec![StorageHasher::Blake2_128Concat], value: meta_type::(), }, default: vec![0], docs: vec![], }, StorageEntryMetadata { - name: "NMap2", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - key: meta_type::<(u16, u32)>(), - hashers: vec![ - StorageHasher::Twox64Concat, - StorageHasher::Blake2_128Concat, - ], - value: meta_type::(), - }, - default: vec![0], - docs: vec![], - }, - #[cfg(feature = "conditional-storage")] - StorageEntryMetadata { - name: "ConditionalValue", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(meta_type::()), - default: vec![0], - docs: vec![], - }, - #[cfg(feature = "conditional-storage")] - StorageEntryMetadata { - name: "ConditionalMap", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - key: meta_type::(), - value: meta_type::(), - hashers: vec![StorageHasher::Twox64Concat], - }, - default: vec![0], - docs: vec![], - }, - #[cfg(feature = "conditional-storage")] - StorageEntryMetadata { - name: "ConditionalDoubleMap", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - value: meta_type::(), - key: meta_type::<(u8, u16)>(), - hashers: vec![ - StorageHasher::Blake2_128Concat, - StorageHasher::Twox64Concat, - ], - }, - default: vec![0], - docs: vec![], - }, - #[cfg(feature = "conditional-storage")] - StorageEntryMetadata { - name: "ConditionalNMap", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - key: meta_type::<(u8, u16)>(), - hashers: vec![ - StorageHasher::Blake2_128Concat, - StorageHasher::Twox64Concat, - ], - value: meta_type::(), - }, - default: vec![0], - docs: vec![], - }, - StorageEntryMetadata { - name: "RenamedCountedMap", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - hashers: vec![StorageHasher::Twox64Concat], - key: meta_type::(), - value: meta_type::(), - }, - default: vec![0], - docs: vec![], - }, - StorageEntryMetadata { - name: "CounterForRenamedCountedMap", + name: "CounterForSomeCountedStorageMap", modifier: StorageEntryModifier::Default, ty: StorageEntryType::Plain(meta_type::()), default: vec![0, 0, 0, 0], @@ -1426,55 +1268,6 @@ fn metadata() { }, ], }), - calls: Some(meta_type::>().into()), - event: Some(meta_type::>().into()), - constants: vec![ - PalletConstantMetadata { - name: "MyGetParam", - ty: meta_type::(), - value: vec![10, 0, 0, 0], - docs: vec![" Some comment", " Some comment"], - }, - PalletConstantMetadata { - name: "MyGetParam2", - ty: meta_type::(), - value: vec![11, 0, 0, 0], - docs: vec![" Some comment", " Some comment"], - }, - PalletConstantMetadata { - name: "MyGetParam3", - ty: meta_type::(), - value: vec![12, 0, 0, 0, 0, 0, 0, 0], - docs: vec![], - }, - PalletConstantMetadata { - name: "some_extra", - ty: meta_type::(), - value: vec![100, 0, 0, 0, 0, 0, 0, 0], - docs: vec![" Some doc", " Some doc"], - }, - PalletConstantMetadata { - name: "some_extra_extra", - ty: meta_type::(), - value: vec![0, 0, 0, 0, 0, 0, 0, 0], - docs: vec![" Some doc"], - }, - ], - error: Some(PalletErrorMetadata { ty: meta_type::>() }), - }, - PalletMetadata { - index: 2, - name: "Example2", - storage: Some(PalletStorageMetadata { - prefix: "Example2", - entries: vec![StorageEntryMetadata { - name: "SomeValue", - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(meta_type::>()), - default: vec![0], - docs: vec![], - }], - }), calls: Some(meta_type::>().into()), event: Some(PalletEventMetadata { ty: meta_type::() }), constants: vec![], @@ -1504,7 +1297,7 @@ fn metadata() { _ => panic!("metadata has been bumped, test needs to be updated"), }; - pretty_assertions::assert_eq!(actual_metadata.pallets[1], expected_metadata.pallets[1]); + pretty_assertions::assert_eq!(actual_metadata.pallets, expected_metadata.pallets); } #[test] @@ -1645,6 +1438,13 @@ fn test_storage_info() { max_values: Some(1), max_size: Some(4), }, + StorageInfo { + pallet_name: b"Example".to_vec(), + storage_name: b"Unbounded".to_vec(), + prefix: prefix(b"Example", b"Unbounded").to_vec(), + max_values: Some(1), + max_size: None, + }, ], ); diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr index d1b040c16091f..3d1ea1adc9862 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound.stderr @@ -9,12 +9,16 @@ error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` = note: required for the cast to the object type `dyn std::fmt::Debug` error[E0277]: the trait bound `::Bar: Clone` is not satisfied - --> $DIR/call_argument_invalid_bound.rs:20:36 - | -20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { - | ^^^ the trait `Clone` is not implemented for `::Bar` - | - = note: required by `clone` + --> $DIR/call_argument_invalid_bound.rs:20:36 + | +20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { + | ^^^ the trait `Clone` is not implemented for `::Bar` + | +note: required by `clone` + --> $DIR/clone.rs:121:5 + | +121 | fn clone(&self) -> Self; + | ^^^^^^^^^^^^^^^^^^^^^^^^ error[E0369]: binary operation `==` cannot be applied to type `&::Bar` --> $DIR/call_argument_invalid_bound.rs:20:36 diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr index 84d4863672957..15c611f4918d0 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_2.stderr @@ -9,12 +9,16 @@ error[E0277]: `::Bar` doesn't implement `std::fmt::Debug` = note: required for the cast to the object type `dyn std::fmt::Debug` error[E0277]: the trait bound `::Bar: Clone` is not satisfied - --> $DIR/call_argument_invalid_bound_2.rs:20:36 - | -20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { - | ^^^ the trait `Clone` is not implemented for `::Bar` - | - = note: required by `clone` + --> $DIR/call_argument_invalid_bound_2.rs:20:36 + | +20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { + | ^^^ the trait `Clone` is not implemented for `::Bar` + | +note: required by `clone` + --> $DIR/clone.rs:121:5 + | +121 | fn clone(&self) -> Self; + | ^^^^^^^^^^^^^^^^^^^^^^^^ error[E0369]: binary operation `==` cannot be applied to type `&::Bar` --> $DIR/call_argument_invalid_bound_2.rs:20:36 @@ -28,17 +32,28 @@ help: consider further restricting this bound | ^^^^^^^^^^^^^^^^^^^^^ error[E0277]: the trait bound `::Bar: WrapperTypeEncode` is not satisfied - --> $DIR/call_argument_invalid_bound_2.rs:20:36 + --> $DIR/call_argument_invalid_bound_2.rs:1:1 | -20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { - | ^^^ the trait `WrapperTypeEncode` is not implemented for `::Bar` +1 | #[frame_support::pallet] + | ^----------------------- + | | + | _in this procedural macro expansion + | | +2 | | mod pallet { +3 | | use frame_support::pallet_prelude::{Hooks, DispatchResultWithPostInfo}; +4 | | use frame_system::pallet_prelude::{BlockNumberFor, OriginFor}; +... | +16 | | +17 | | #[pallet::call] + | |__________________^ the trait `WrapperTypeEncode` is not implemented for `::Bar` | - ::: $CARGO/parity-scale-codec-2.2.0/src/codec.rs + ::: $CARGO/parity-scale-codec-2.3.1/src/codec.rs | - | fn encode_to(&self, dest: &mut T) { - | ------ required by this bound in `encode_to` + | fn encode_to(&self, dest: &mut T) { + | ------ required by this bound in `encode_to` | = note: required because of the requirements on the impl of `Encode` for `::Bar` + = note: this error originates in the derive macro `frame_support::codec::Encode` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0277]: the trait bound `::Bar: WrapperTypeDecode` is not satisfied --> $DIR/call_argument_invalid_bound_2.rs:20:36 @@ -46,9 +61,9 @@ error[E0277]: the trait bound `::Bar: WrapperTypeDecode` is 20 | pub fn foo(origin: OriginFor, bar: T::Bar) -> DispatchResultWithPostInfo { | ^^^ the trait `WrapperTypeDecode` is not implemented for `::Bar` | - ::: $CARGO/parity-scale-codec-2.2.0/src/codec.rs + ::: $CARGO/parity-scale-codec-2.3.1/src/codec.rs | | fn decode(input: &mut I) -> Result; - | ----- required by this bound in `pallet::_::_parity_scale_codec::Decode::decode` + | ----- required by this bound in `parity_scale_codec::Decode::decode` | = note: required because of the requirements on the impl of `Decode` for `::Bar` diff --git a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr index 73513907e85f3..144b7e12bd664 100644 --- a/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr +++ b/frame/support/test/tests/pallet_ui/call_argument_invalid_bound_3.stderr @@ -5,17 +5,21 @@ error[E0277]: `Bar` doesn't implement `std::fmt::Debug` | ^^^ `Bar` cannot be formatted using `{:?}` | = help: the trait `std::fmt::Debug` is not implemented for `Bar` - = note: add `#[derive(Debug)]` or manually implement `std::fmt::Debug` + = note: add `#[derive(Debug)]` to `Bar` or manually `impl std::fmt::Debug for Bar` = note: required because of the requirements on the impl of `std::fmt::Debug` for `&Bar` = note: required for the cast to the object type `dyn std::fmt::Debug` error[E0277]: the trait bound `Bar: Clone` is not satisfied - --> $DIR/call_argument_invalid_bound_3.rs:22:36 - | -22 | pub fn foo(origin: OriginFor, bar: Bar) -> DispatchResultWithPostInfo { - | ^^^ the trait `Clone` is not implemented for `Bar` - | - = note: required by `clone` + --> $DIR/call_argument_invalid_bound_3.rs:22:36 + | +22 | pub fn foo(origin: OriginFor, bar: Bar) -> DispatchResultWithPostInfo { + | ^^^ the trait `Clone` is not implemented for `Bar` + | +note: required by `clone` + --> $DIR/clone.rs:121:5 + | +121 | fn clone(&self) -> Self; + | ^^^^^^^^^^^^^^^^^^^^^^^^ error[E0369]: binary operation `==` cannot be applied to type `&Bar` --> $DIR/call_argument_invalid_bound_3.rs:22:36 diff --git a/frame/support/test/tests/pallet_ui/event_field_not_member.stderr b/frame/support/test/tests/pallet_ui/event_field_not_member.stderr index d48012a6c952d..bf4c05bb4e5b5 100644 --- a/frame/support/test/tests/pallet_ui/event_field_not_member.stderr +++ b/frame/support/test/tests/pallet_ui/event_field_not_member.stderr @@ -1,10 +1,14 @@ error[E0277]: the trait bound `::Bar: Clone` is not satisfied - --> $DIR/event_field_not_member.rs:23:7 - | -23 | B { b: T::Bar }, - | ^ the trait `Clone` is not implemented for `::Bar` - | - = note: required by `clone` + --> $DIR/event_field_not_member.rs:23:7 + | +23 | B { b: T::Bar }, + | ^ the trait `Clone` is not implemented for `::Bar` + | +note: required by `clone` + --> $DIR/clone.rs:121:5 + | +121 | fn clone(&self) -> Self; + | ^^^^^^^^^^^^^^^^^^^^^^^^ error[E0369]: binary operation `==` cannot be applied to type `&::Bar` --> $DIR/event_field_not_member.rs:23:7 diff --git a/frame/support/test/tests/pallet_ui/event_not_in_trait.stderr b/frame/support/test/tests/pallet_ui/event_not_in_trait.stderr index dd96c700ce7e5..e3126ad6a85dc 100644 --- a/frame/support/test/tests/pallet_ui/event_not_in_trait.stderr +++ b/frame/support/test/tests/pallet_ui/event_not_in_trait.stderr @@ -4,4 +4,4 @@ error: Invalid usage of Event, `Config` contains no associated type `Event`, but 1 | #[frame_support::pallet] | ^^^^^^^^^^^^^^^^^^^^^^^^ | - = note: this error originates in an attribute macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the attribute macro `frame_support::pallet` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr b/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr index 4bc3cfdcbf9b7..ad8300b8d89b8 100644 --- a/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr +++ b/frame/support/test/tests/pallet_ui/genesis_default_not_satisfied.stderr @@ -6,5 +6,5 @@ error[E0277]: the trait bound `pallet::GenesisConfig: std::default::Default` is | ::: $WORKSPACE/frame/support/src/traits/hooks.rs | - | pub trait GenesisBuild: Default + MaybeSerializeDeserialize { + | pub trait GenesisBuild: Default + sp_runtime::traits::MaybeSerializeDeserialize { | ------- required by this bound in `GenesisBuild` diff --git a/frame/support/test/tests/pallet_ui/genesis_invalid_generic.stderr b/frame/support/test/tests/pallet_ui/genesis_invalid_generic.stderr index f451f7b16aee5..f57b4a61c80c5 100644 --- a/frame/support/test/tests/pallet_ui/genesis_invalid_generic.stderr +++ b/frame/support/test/tests/pallet_ui/genesis_invalid_generic.stderr @@ -10,4 +10,4 @@ error: expected `<` 1 | #[frame_support::pallet] | ^^^^^^^^^^^^^^^^^^^^^^^^ | - = note: this error originates in an attribute macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the attribute macro `frame_support::pallet` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr b/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr index 3d7303fafdcf5..ecb57bec37a7b 100644 --- a/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr +++ b/frame/support/test/tests/pallet_ui/hooks_invalid_item.stderr @@ -2,14 +2,14 @@ error[E0107]: missing generics for trait `Hooks` --> $DIR/hooks_invalid_item.rs:12:18 | 12 | impl Hooks for Pallet {} - | ^^^^^ expected 1 type argument + | ^^^^^ expected 1 generic argument | -note: trait defined here, with 1 type parameter: `BlockNumber` +note: trait defined here, with 1 generic parameter: `BlockNumber` --> $DIR/hooks.rs:214:11 | 214 | pub trait Hooks { | ^^^^^ ----------- -help: use angle brackets to add missing type argument +help: add missing generic argument | 12 | impl Hooks for Pallet {} - | ^^^^^^^^^^^^^ + | ^^^^^^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr index 239de4dba949b..cd3032c49735a 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen.stderr @@ -1,77 +1,105 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied - --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:20:12 - | -20 | #[pallet::storage] - | ^^^^^^^ the trait `TypeInfo` is not implemented for `Bar` - | - = note: required because of the requirements on the impl of `StaticTypeInfo` for `Bar` - = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` - = note: required by `build_metadata` + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:20:12 + | +20 | #[pallet::storage] + | ^^^^^^^ the trait `TypeInfo` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `StaticTypeInfo` for `Bar` + = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` +note: required by `build_metadata` + --> $DIR/mod.rs:113:2 + | +113 | fn build_metadata(doc: Vec<&'static str>, entries: &mut Vec); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied - --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:20:12 + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:20:12 + | +20 | #[pallet::storage] + | ^^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `Decode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` +note: required by `build_metadata` + --> $DIR/mod.rs:113:2 + | +113 | fn build_metadata(doc: Vec<&'static str>, entries: &mut Vec); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:20:12 + | +20 | #[pallet::storage] + | ^^^^^^^ the trait `EncodeLike` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `FullEncode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` +note: required by `build_metadata` + --> $DIR/mod.rs:113:2 + | +113 | fn build_metadata(doc: Vec<&'static str>, entries: &mut Vec); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:20:12 + | +20 | #[pallet::storage] + | ^^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `Encode` for `Bar` + = note: required because of the requirements on the impl of `FullEncode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` +note: required by `build_metadata` + --> $DIR/mod.rs:113:2 + | +113 | fn build_metadata(doc: Vec<&'static str>, entries: &mut Vec); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:9:12 | -20 | #[pallet::storage] - | ^^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` +9 | #[pallet::pallet] + | ^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` | = note: required because of the requirements on the impl of `Decode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` - = note: required by `build_metadata` + = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` +note: required by `partial_storage_info` + --> $DIR/storage.rs:88:2 + | +88 | fn partial_storage_info() -> Vec; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied - --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:20:12 + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:9:12 | -20 | #[pallet::storage] - | ^^^^^^^ the trait `EncodeLike` is not implemented for `Bar` +9 | #[pallet::pallet] + | ^^^^^^ the trait `EncodeLike` is not implemented for `Bar` | = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` - = note: required by `build_metadata` + = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` +note: required by `partial_storage_info` + --> $DIR/storage.rs:88:2 + | +88 | fn partial_storage_info() -> Vec; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied - --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:20:12 + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:9:12 | -20 | #[pallet::storage] - | ^^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` +9 | #[pallet::pallet] + | ^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` | = note: required because of the requirements on the impl of `Encode` for `Bar` = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` - = note: required by `build_metadata` - -error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied - --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:9:12 - | -9 | #[pallet::pallet] - | ^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` - | - = note: required because of the requirements on the impl of `Decode` for `Bar` - = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` - = note: required by `partial_storage_info` - -error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied - --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:9:12 - | -9 | #[pallet::pallet] - | ^^^^^^ the trait `EncodeLike` is not implemented for `Bar` - | - = note: required because of the requirements on the impl of `FullEncode` for `Bar` - = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` - = note: required by `partial_storage_info` - -error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied - --> $DIR/storage_ensure_span_are_ok_on_wrong_gen.rs:9:12 - | -9 | #[pallet::pallet] - | ^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` - | - = note: required because of the requirements on the impl of `Encode` for `Bar` - = note: required because of the requirements on the impl of `FullEncode` for `Bar` - = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` - = note: required by `partial_storage_info` + = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` +note: required by `partial_storage_info` + --> $DIR/storage.rs:88:2 + | +88 | fn partial_storage_info() -> Vec; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr index a5bf32a0ef2d2..3d03af836986a 100644 --- a/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr +++ b/frame/support/test/tests/pallet_ui/storage_ensure_span_are_ok_on_wrong_gen_unnamed.stderr @@ -1,77 +1,105 @@ error[E0277]: the trait bound `Bar: TypeInfo` is not satisfied - --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:20:12 - | -20 | #[pallet::storage] - | ^^^^^^^ the trait `TypeInfo` is not implemented for `Bar` - | - = note: required because of the requirements on the impl of `StaticTypeInfo` for `Bar` - = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` - = note: required by `build_metadata` + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:20:12 + | +20 | #[pallet::storage] + | ^^^^^^^ the trait `TypeInfo` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `StaticTypeInfo` for `Bar` + = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` +note: required by `build_metadata` + --> $DIR/mod.rs:113:2 + | +113 | fn build_metadata(doc: Vec<&'static str>, entries: &mut Vec); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied - --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:20:12 + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:20:12 + | +20 | #[pallet::storage] + | ^^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `Decode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` +note: required by `build_metadata` + --> $DIR/mod.rs:113:2 + | +113 | fn build_metadata(doc: Vec<&'static str>, entries: &mut Vec); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:20:12 + | +20 | #[pallet::storage] + | ^^^^^^^ the trait `EncodeLike` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `FullEncode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` +note: required by `build_metadata` + --> $DIR/mod.rs:113:2 + | +113 | fn build_metadata(doc: Vec<&'static str>, entries: &mut Vec); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:20:12 + | +20 | #[pallet::storage] + | ^^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` + | + = note: required because of the requirements on the impl of `Encode` for `Bar` + = note: required because of the requirements on the impl of `FullEncode` for `Bar` + = note: required because of the requirements on the impl of `FullCodec` for `Bar` + = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` +note: required by `build_metadata` + --> $DIR/mod.rs:113:2 + | +113 | fn build_metadata(doc: Vec<&'static str>, entries: &mut Vec); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:9:12 | -20 | #[pallet::storage] - | ^^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` +9 | #[pallet::pallet] + | ^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` | = note: required because of the requirements on the impl of `Decode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` - = note: required by `build_metadata` + = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` +note: required by `partial_storage_info` + --> $DIR/storage.rs:88:2 + | +88 | fn partial_storage_info() -> Vec; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied - --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:20:12 + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:9:12 | -20 | #[pallet::storage] - | ^^^^^^^ the trait `EncodeLike` is not implemented for `Bar` +9 | #[pallet::pallet] + | ^^^^^^ the trait `EncodeLike` is not implemented for `Bar` | = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` - = note: required by `build_metadata` + = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` +note: required by `partial_storage_info` + --> $DIR/storage.rs:88:2 + | +88 | fn partial_storage_info() -> Vec; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied - --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:20:12 + --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:9:12 | -20 | #[pallet::storage] - | ^^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` +9 | #[pallet::pallet] + | ^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` | = note: required because of the requirements on the impl of `Encode` for `Bar` = note: required because of the requirements on the impl of `FullEncode` for `Bar` = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `StorageEntryMetadataBuilder` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` - = note: required by `build_metadata` - -error[E0277]: the trait bound `Bar: WrapperTypeDecode` is not satisfied - --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:9:12 - | -9 | #[pallet::pallet] - | ^^^^^^ the trait `WrapperTypeDecode` is not implemented for `Bar` - | - = note: required because of the requirements on the impl of `Decode` for `Bar` - = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` - = note: required by `partial_storage_info` - -error[E0277]: the trait bound `Bar: EncodeLike` is not satisfied - --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:9:12 - | -9 | #[pallet::pallet] - | ^^^^^^ the trait `EncodeLike` is not implemented for `Bar` - | - = note: required because of the requirements on the impl of `FullEncode` for `Bar` - = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` - = note: required by `partial_storage_info` - -error[E0277]: the trait bound `Bar: WrapperTypeEncode` is not satisfied - --> $DIR/storage_ensure_span_are_ok_on_wrong_gen_unnamed.rs:9:12 - | -9 | #[pallet::pallet] - | ^^^^^^ the trait `WrapperTypeEncode` is not implemented for `Bar` - | - = note: required because of the requirements on the impl of `Encode` for `Bar` - = note: required because of the requirements on the impl of `FullEncode` for `Bar` - = note: required because of the requirements on the impl of `FullCodec` for `Bar` - = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` - = note: required by `partial_storage_info` + = note: required because of the requirements on the impl of `PartialStorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` +note: required by `partial_storage_info` + --> $DIR/storage.rs:88:2 + | +88 | fn partial_storage_info() -> Vec; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr index ad415911bc933..0ffb015e36bca 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied.stderr @@ -5,4 +5,8 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied | ^^^^^^^^^^^^^^^^^^^^^ the trait `MaxEncodedLen` is not implemented for `Bar` | = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageValue<_GeneratedPrefixForStorageFoo, Bar>` - = note: required by `storage_info` +note: required by `storage_info` + --> $DIR/storage.rs:71:2 + | +71 | fn storage_info() -> Vec; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr index 6c92423c6a7fe..2b70102fdac24 100644 --- a/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr +++ b/frame/support/test/tests/pallet_ui/storage_info_unsatisfied_nmap.stderr @@ -6,4 +6,8 @@ error[E0277]: the trait bound `Bar: MaxEncodedLen` is not satisfied | = note: required because of the requirements on the impl of `KeyGeneratorMaxEncodedLen` for `Key` = note: required because of the requirements on the impl of `StorageInfoTrait` for `frame_support::pallet_prelude::StorageNMap<_GeneratedPrefixForStorageFoo, Key, u32>` - = note: required by `storage_info` +note: required by `storage_info` + --> $DIR/storage.rs:71:2 + | +71 | fn storage_info() -> Vec; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr b/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr index bf93d99cf56bd..6313bd691f943 100644 --- a/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr +++ b/frame/support/test/tests/pallet_ui/storage_invalid_attribute.stderr @@ -1,4 +1,4 @@ -error: expected `getter` or `storage_prefix` +error: expected one of: `getter`, `storage_prefix`, `unbounded` --> $DIR/storage_invalid_attribute.rs:16:12 | 16 | #[pallet::generate_store(pub trait Store)] diff --git a/frame/support/test/tests/pallet_ui/storage_multiple_getters.stderr b/frame/support/test/tests/pallet_ui/storage_multiple_getters.stderr index 188eed3cb0d17..40f57f16e0df5 100644 --- a/frame/support/test/tests/pallet_ui/storage_multiple_getters.stderr +++ b/frame/support/test/tests/pallet_ui/storage_multiple_getters.stderr @@ -1,4 +1,4 @@ -error: Invalid pallet::storage, multiple argument pallet::getter found +error: Invalid attribute: Duplicate attribute --> $DIR/storage_multiple_getters.rs:20:3 | 20 | #[pallet::getter(fn foo_error)] diff --git a/frame/support/test/tests/pallet_ui/storage_multiple_renames.stderr b/frame/support/test/tests/pallet_ui/storage_multiple_renames.stderr index 9288d131d95af..52cb7e85adf21 100644 --- a/frame/support/test/tests/pallet_ui/storage_multiple_renames.stderr +++ b/frame/support/test/tests/pallet_ui/storage_multiple_renames.stderr @@ -1,4 +1,4 @@ -error: Invalid pallet::storage, multiple argument pallet::storage_prefix found +error: Invalid attribute: Duplicate attribute --> $DIR/storage_multiple_renames.rs:20:3 | 20 | #[pallet::storage_prefix = "Baz"] diff --git a/frame/support/test/tests/reserved_keyword/on_initialize.stderr b/frame/support/test/tests/reserved_keyword/on_initialize.stderr index 3df392dee9005..84e93fa52c2d9 100644 --- a/frame/support/test/tests/reserved_keyword/on_initialize.stderr +++ b/frame/support/test/tests/reserved_keyword/on_initialize.stderr @@ -4,7 +4,7 @@ error: Invalid call fn name: `on_finalize`, name is reserved and doesn't match e 28 | reserved!(on_finalize on_initialize on_runtime_upgrade offchain_worker deposit_event); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `$crate::__check_reserved_fn_name` (in Nightly builds, run with -Z macro-backtrace for more info) error: Invalid call fn name: `on_initialize`, name is reserved and doesn't match expected signature, please refer to `decl_module!` documentation to see the appropriate usage, or rename it to an unreserved keyword. --> $DIR/on_initialize.rs:28:1 @@ -12,7 +12,7 @@ error: Invalid call fn name: `on_initialize`, name is reserved and doesn't match 28 | reserved!(on_finalize on_initialize on_runtime_upgrade offchain_worker deposit_event); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `$crate::__check_reserved_fn_name` (in Nightly builds, run with -Z macro-backtrace for more info) error: Invalid call fn name: `on_runtime_upgrade`, name is reserved and doesn't match expected signature, please refer to `decl_module!` documentation to see the appropriate usage, or rename it to an unreserved keyword. --> $DIR/on_initialize.rs:28:1 @@ -20,7 +20,7 @@ error: Invalid call fn name: `on_runtime_upgrade`, name is reserved and doesn't 28 | reserved!(on_finalize on_initialize on_runtime_upgrade offchain_worker deposit_event); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `$crate::__check_reserved_fn_name` (in Nightly builds, run with -Z macro-backtrace for more info) error: Invalid call fn name: `offchain_worker`, name is reserved and doesn't match expected signature, please refer to `decl_module!` documentation to see the appropriate usage, or rename it to an unreserved keyword. --> $DIR/on_initialize.rs:28:1 @@ -28,7 +28,7 @@ error: Invalid call fn name: `offchain_worker`, name is reserved and doesn't mat 28 | reserved!(on_finalize on_initialize on_runtime_upgrade offchain_worker deposit_event); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `$crate::__check_reserved_fn_name` (in Nightly builds, run with -Z macro-backtrace for more info) error: Invalid call fn name: `deposit_event`, name is reserved and doesn't match expected signature, please refer to `decl_module!` documentation to see the appropriate usage, or rename it to an unreserved keyword. --> $DIR/on_initialize.rs:28:1 @@ -36,4 +36,4 @@ error: Invalid call fn name: `deposit_event`, name is reserved and doesn't match 28 | reserved!(on_finalize on_initialize on_runtime_upgrade offchain_worker deposit_event); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `$crate::__check_reserved_fn_name` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/frame/system/Cargo.toml b/frame/system/Cargo.toml index 389730107b439..dc69bd2d5e85f 100644 --- a/frame/system/Cargo.toml +++ b/frame/system/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME system module" readme = "README.md" diff --git a/frame/system/benches/bench.rs b/frame/system/benches/bench.rs index 97c19c5e8159a..c8a9d4eadfea0 100644 --- a/frame/system/benches/bench.rs +++ b/frame/system/benches/bench.rs @@ -16,8 +16,6 @@ // limitations under the License. use criterion::{black_box, criterion_group, criterion_main, Criterion}; -use frame_support::{decl_event, decl_module}; -use frame_system as system; use sp_core::H256; use sp_runtime::{ testing::Header, @@ -25,24 +23,24 @@ use sp_runtime::{ Perbill, }; +#[frame_support::pallet] mod module { - use super::*; + use frame_support::pallet_prelude::*; - pub trait Config: system::Config { - type Event: From + Into<::Event>; - } + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); - decl_module! { - pub struct Module for enum Call where origin: T::Origin { - pub fn deposit_event() = default; - } + #[pallet::config] + pub trait Config: frame_system::Config { + type Event: From + IsType<::Event>; } - decl_event!( - pub enum Event { - Complex(Vec, u32, u16, u128), - } - ); + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + Complex(Vec, u32, u16, u128), + } } type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; @@ -55,7 +53,7 @@ frame_support::construct_runtime!( UncheckedExtrinsic = UncheckedExtrinsic, { System: frame_system::{Pallet, Call, Config, Storage, Event}, - Module: module::{Pallet, Call, Event}, + Module: module::{Pallet, Event}, } ); @@ -70,7 +68,7 @@ frame_support::parameter_types! { 4 * 1024 * 1024, Perbill::from_percent(75), ); } -impl system::Config for Runtime { +impl frame_system::Config for Runtime { type BaseCallFilter = frame_support::traits::Everything; type BlockWeights = (); type BlockLength = BlockLength; @@ -101,14 +99,17 @@ impl module::Config for Runtime { } fn new_test_ext() -> sp_io::TestExternalities { - system::GenesisConfig::default().build_storage::().unwrap().into() + frame_system::GenesisConfig::default() + .build_storage::() + .unwrap() + .into() } fn deposit_events(n: usize) { let mut t = new_test_ext(); t.execute_with(|| { for _ in 0..n { - module::Module::::deposit_event(module::Event::Complex( + module::Pallet::::deposit_event(module::Event::Complex( vec![1, 2, 3], 2, 3, diff --git a/frame/system/benchmarking/Cargo.toml b/frame/system/benchmarking/Cargo.toml index 29bcccfd7d830..6aa2251f287d9 100644 --- a/frame/system/benchmarking/Cargo.toml +++ b/frame/system/benchmarking/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME System benchmarking" readme = "README.md" diff --git a/frame/system/benchmarking/src/lib.rs b/frame/system/benchmarking/src/lib.rs index beb61829bce37..e7371b1099e5e 100644 --- a/frame/system/benchmarking/src/lib.rs +++ b/frame/system/benchmarking/src/lib.rs @@ -20,7 +20,7 @@ #![cfg_attr(not(feature = "std"), no_std)] use codec::Encode; -use frame_benchmarking::{benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_benchmarking::{benchmarks, whitelisted_caller}; use frame_support::{storage, traits::Get, weights::DispatchClass}; use frame_system::{Call, DigestItemOf, Pallet as System, RawOrigin}; use sp_core::{storage::well_known_keys, ChangesTrieConfiguration}; @@ -140,6 +140,6 @@ benchmarks! { verify { assert_eq!(storage::unhashed::get_raw(&last_key), None); } -} -impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); + impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/frame/system/rpc/runtime-api/Cargo.toml b/frame/system/rpc/runtime-api/Cargo.toml index fce29612b4d8c..b5e569e1298ca 100644 --- a/frame/system/rpc/runtime-api/Cargo.toml +++ b/frame/system/rpc/runtime-api/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Runtime API definition required by System RPC extensions." readme = "README.md" diff --git a/frame/system/src/extensions/check_genesis.rs b/frame/system/src/extensions/check_genesis.rs index 6f409d5d3d4ad..9c5c890ee6098 100644 --- a/frame/system/src/extensions/check_genesis.rs +++ b/frame/system/src/extensions/check_genesis.rs @@ -24,6 +24,11 @@ use sp_runtime::{ }; /// Genesis hash check to provide replay protection between different networks. +/// +/// # Transaction Validity +/// +/// Note that while a transaction with invalid `genesis_hash` will fail to be decoded, +/// the extension does not affect any other fields of `TransactionValidity` directly. #[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] #[scale_info(skip_type_params(T))] pub struct CheckGenesis(sp_std::marker::PhantomData); diff --git a/frame/system/src/extensions/check_mortality.rs b/frame/system/src/extensions/check_mortality.rs index 69cca765efea9..941f28dc6fc63 100644 --- a/frame/system/src/extensions/check_mortality.rs +++ b/frame/system/src/extensions/check_mortality.rs @@ -27,6 +27,10 @@ use sp_runtime::{ }; /// Check for transaction mortality. +/// +/// # Transaction Validity +/// +/// The extension affects `longevity` of the transaction according to the [`Era`] definition. #[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] #[scale_info(skip_type_params(T))] pub struct CheckMortality(Era, sp_std::marker::PhantomData); diff --git a/frame/system/src/extensions/check_nonce.rs b/frame/system/src/extensions/check_nonce.rs index 74be83398421e..3c6f9a1b4dbd1 100644 --- a/frame/system/src/extensions/check_nonce.rs +++ b/frame/system/src/extensions/check_nonce.rs @@ -30,8 +30,11 @@ use sp_std::vec; /// Nonce check and increment to give replay protection for transactions. /// -/// Note that this does not set any priority by default. Make sure that AT LEAST one of the signed -/// extension sets some kind of priority upon validating transactions. +/// # Transaction Validity +/// +/// This extension affects `requires` and `provides` tags of validity, but DOES NOT +/// set the `priority` field. Make sure that AT LEAST one of the signed extension sets +/// some kind of priority upon validating transactions. #[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] #[scale_info(skip_type_params(T))] pub struct CheckNonce(#[codec(compact)] pub T::Index); diff --git a/frame/system/src/extensions/check_spec_version.rs b/frame/system/src/extensions/check_spec_version.rs index 0217aefae6b9d..688abe99763a2 100644 --- a/frame/system/src/extensions/check_spec_version.rs +++ b/frame/system/src/extensions/check_spec_version.rs @@ -21,6 +21,11 @@ use scale_info::TypeInfo; use sp_runtime::{traits::SignedExtension, transaction_validity::TransactionValidityError}; /// Ensure the runtime version registered in the transaction is the same as at present. +/// +/// # Transaction Validity +/// +/// The transaction with incorrect `spec_version` are considered invalid. The validity +/// is not affected in any other way. #[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] #[scale_info(skip_type_params(T))] pub struct CheckSpecVersion(sp_std::marker::PhantomData); diff --git a/frame/system/src/extensions/check_tx_version.rs b/frame/system/src/extensions/check_tx_version.rs index 9418d3ff5d937..f6bb53e1cba34 100644 --- a/frame/system/src/extensions/check_tx_version.rs +++ b/frame/system/src/extensions/check_tx_version.rs @@ -21,6 +21,11 @@ use scale_info::TypeInfo; use sp_runtime::{traits::SignedExtension, transaction_validity::TransactionValidityError}; /// Ensure the transaction version registered in the transaction is the same as at present. +/// +/// # Transaction Validity +/// +/// The transaction with incorrect `transaction_version` are considered invalid. The validity +/// is not affected in any other way. #[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] #[scale_info(skip_type_params(T))] pub struct CheckTxVersion(sp_std::marker::PhantomData); diff --git a/frame/system/src/extensions/check_weight.rs b/frame/system/src/extensions/check_weight.rs index 92dc7382fa2d5..ca885accd660f 100644 --- a/frame/system/src/extensions/check_weight.rs +++ b/frame/system/src/extensions/check_weight.rs @@ -19,19 +19,21 @@ use crate::{limits::BlockWeights, Config, Pallet}; use codec::{Decode, Encode}; use frame_support::{ traits::Get, - weights::{priority::FrameTransactionPriority, DispatchClass, DispatchInfo, PostDispatchInfo}, + weights::{DispatchClass, DispatchInfo, PostDispatchInfo}, }; use scale_info::TypeInfo; use sp_runtime::{ traits::{DispatchInfoOf, Dispatchable, PostDispatchInfoOf, SignedExtension}, - transaction_validity::{ - InvalidTransaction, TransactionPriority, TransactionValidity, TransactionValidityError, - ValidTransaction, - }, + transaction_validity::{InvalidTransaction, TransactionValidity, TransactionValidityError}, DispatchResult, }; /// Block resource (weight) limit check. +/// +/// # Transaction Validity +/// +/// This extension does not influence any fields of `TransactionValidity` in case the +/// transaction is valid. #[derive(Encode, Decode, Clone, Eq, PartialEq, Default, TypeInfo)] #[scale_info(skip_type_params(T))] pub struct CheckWeight(sp_std::marker::PhantomData); @@ -81,23 +83,6 @@ where } } - /// Get the priority of an extrinsic denoted by `info`. - /// - /// Operational transaction will be given a fixed initial amount to be fairly distinguished from - /// the normal ones. - fn get_priority(info: &DispatchInfoOf) -> TransactionPriority { - match info.class { - // Normal transaction. - DispatchClass::Normal => FrameTransactionPriority::Normal(info.weight.into()).into(), - // Don't use up the whole priority space, to allow things like `tip` to be taken into - // account as well. - DispatchClass::Operational => - FrameTransactionPriority::Operational(info.weight.into()).into(), - // Mandatory extrinsics are only for inherents; never transactions. - DispatchClass::Mandatory => TransactionPriority::min_value(), - } - } - /// Creates new `SignedExtension` to check weight of the extrinsic. pub fn new() -> Self { Self(Default::default()) @@ -130,7 +115,7 @@ where // consumption from causing false negatives. Self::check_extrinsic_weight(info)?; - Ok(ValidTransaction { priority: Self::get_priority(info), ..Default::default() }) + Ok(Default::default()) } } @@ -368,13 +353,7 @@ mod tests { }; let len = 0_usize; - assert_eq!( - CheckWeight::::do_validate(&okay, len), - Ok(ValidTransaction { - priority: CheckWeight::::get_priority(&okay), - ..Default::default() - }) - ); + assert_eq!(CheckWeight::::do_validate(&okay, len), Ok(Default::default())); assert_err!( CheckWeight::::do_validate(&max, len), InvalidTransaction::ExhaustsResources @@ -506,30 +485,6 @@ mod tests { }) } - #[test] - fn signed_ext_check_weight_works() { - new_test_ext().execute_with(|| { - let normal = - DispatchInfo { weight: 100, class: DispatchClass::Normal, pays_fee: Pays::Yes }; - let op = DispatchInfo { - weight: 100, - class: DispatchClass::Operational, - pays_fee: Pays::Yes, - }; - let len = 0_usize; - - let priority = CheckWeight::(PhantomData) - .validate(&1, CALL, &normal, len) - .unwrap() - .priority; - assert_eq!(priority, 100); - - let priority = - CheckWeight::(PhantomData).validate(&1, CALL, &op, len).unwrap().priority; - assert_eq!(priority, frame_support::weights::priority::LIMIT + 100); - }) - } - #[test] fn signed_ext_check_weight_block_size_works() { new_test_ext().execute_with(|| { diff --git a/frame/timestamp/Cargo.toml b/frame/timestamp/Cargo.toml index 1c95c4782b5c4..cd636ec6c23b5 100644 --- a/frame/timestamp/Cargo.toml +++ b/frame/timestamp/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME Timestamp Module" documentation = "https://docs.rs/pallet-timestamp" diff --git a/frame/timestamp/src/benchmarking.rs b/frame/timestamp/src/benchmarking.rs index 97ddd4cddd63f..98e05439df72b 100644 --- a/frame/timestamp/src/benchmarking.rs +++ b/frame/timestamp/src/benchmarking.rs @@ -20,7 +20,7 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; -use frame_benchmarking::{benchmarks, impl_benchmark_test_suite, TrackedStorageKey}; +use frame_benchmarking::{benchmarks, TrackedStorageKey}; use frame_support::{ensure, traits::OnFinalize}; use frame_system::RawOrigin; @@ -55,6 +55,6 @@ benchmarks! { verify { ensure!(!DidUpdate::::exists(), "Time was not removed."); } -} -impl_benchmark_test_suite!(Timestamp, crate::tests::new_test_ext(), crate::tests::Test); + impl_benchmark_test_suite!(Timestamp, crate::tests::new_test_ext(), crate::tests::Test); +} diff --git a/frame/tips/Cargo.toml b/frame/tips/Cargo.toml index 8ca395e1c5416..a3f268169b784 100644 --- a/frame/tips/Cargo.toml +++ b/frame/tips/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet to manage tips" readme = "README.md" diff --git a/frame/tips/src/benchmarking.rs b/frame/tips/src/benchmarking.rs index 5e08121855210..d8227332bb334 100644 --- a/frame/tips/src/benchmarking.rs +++ b/frame/tips/src/benchmarking.rs @@ -19,7 +19,7 @@ #![cfg(feature = "runtime-benchmarks")] -use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_benchmarking::{account, benchmarks, whitelisted_caller}; use frame_support::ensure; use frame_system::RawOrigin; use sp_runtime::traits::Saturating; @@ -190,6 +190,6 @@ benchmarks! { let hash = T::Hashing::hash_of(&(&reason_hash, &beneficiary)); ensure!(Tips::::contains_key(hash), "tip does not exist"); }: _(RawOrigin::Root, hash) -} -impl_benchmark_test_suite!(TipsMod, crate::tests::new_test_ext(), crate::tests::Test); + impl_benchmark_test_suite!(TipsMod, crate::tests::new_test_ext(), crate::tests::Test); +} diff --git a/frame/transaction-payment/Cargo.toml b/frame/transaction-payment/Cargo.toml index 546939692bbaf..bea263cbef6f1 100644 --- a/frame/transaction-payment/Cargo.toml +++ b/frame/transaction-payment/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet to manage transaction payments" readme = "README.md" @@ -18,7 +18,7 @@ codec = { package = "parity-scale-codec", version = "2.0.0", default-features = ] } scale-info = { version = "1.0", default-features = false, features = ["derive"] } serde = { version = "1.0.126", optional = true } -smallvec = "1.4.1" +smallvec = "1.7.0" sp-core = { version = "4.0.0-dev", path = "../../primitives/core", default-features = false } sp-io = { version = "4.0.0-dev", path = "../../primitives/io", default-features = false } diff --git a/frame/transaction-payment/rpc/Cargo.toml b/frame/transaction-payment/rpc/Cargo.toml index 3858c41a38763..1a2c68227c11f 100644 --- a/frame/transaction-payment/rpc/Cargo.toml +++ b/frame/transaction-payment/rpc/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "RPC interface for the transaction payment pallet." readme = "README.md" diff --git a/frame/transaction-payment/rpc/runtime-api/Cargo.toml b/frame/transaction-payment/rpc/runtime-api/Cargo.toml index 2f78f2439c604..7bdca2f658293 100644 --- a/frame/transaction-payment/rpc/runtime-api/Cargo.toml +++ b/frame/transaction-payment/rpc/runtime-api/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "RPC runtime API for transaction payment FRAME pallet" readme = "README.md" diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index e3a3bccc3d39a..28200bee7054f 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -264,6 +264,30 @@ pub mod pallet { #[pallet::constant] type TransactionByteFee: Get>; + /// A fee mulitplier for `Operational` extrinsics to compute "virtual tip" to boost their + /// `priority` + /// + /// This value is multipled by the `final_fee` to obtain a "virtual tip" that is later + /// added to a tip component in regular `priority` calculations. + /// It means that a `Normal` transaction can front-run a similarly-sized `Operational` + /// extrinsic (with no tip), by including a tip value greater than the virtual tip. + /// + /// ```rust,ignore + /// // For `Normal` + /// let priority = priority_calc(tip); + /// + /// // For `Operational` + /// let virtual_tip = (inclusion_fee + tip) * OperationalFeeMultiplier; + /// let priority = priority_calc(tip + virtual_tip); + /// ``` + /// + /// Note that since we use `final_fee` the multiplier applies also to the regular `tip` + /// sent with the transaction. So, not only does the transaction get a priority bump based + /// on the `inclusion_fee`, but we also amplify the impact of tips applied to `Operational` + /// transactions. + #[pallet::constant] + type OperationalFeeMultiplier: Get; + /// Convert a weight value into a deductible fee based on the currency type. type WeightToFee: WeightToFeePolynomial>; @@ -332,12 +356,7 @@ pub mod pallet { .unwrap(), ); - // This is the minimum value of the multiplier. Make sure that if we collapse to this - // value, we can recover with a reasonable amount of traffic. For this test we assert - // that if we collapse to minimum, the trend will be positive with a weight value - // which is 1% more than the target. - let min_value = T::FeeMultiplierUpdate::min(); - let mut target = T::FeeMultiplierUpdate::target() * + let target = T::FeeMultiplierUpdate::target() * T::BlockWeights::get().get(DispatchClass::Normal).max_total.expect( "Setting `max_total` for `Normal` dispatch class is not compatible with \ `transaction-payment` pallet.", @@ -348,10 +367,17 @@ pub mod pallet { // this is most likely because in a test setup we set everything to (). return } - target += addition; #[cfg(any(feature = "std", test))] sp_io::TestExternalities::new_empty().execute_with(|| { + // This is the minimum value of the multiplier. Make sure that if we collapse to + // this value, we can recover with a reasonable amount of traffic. For this test we + // assert that if we collapse to minimum, the trend will be positive with a weight + // value which is 1% more than the target. + let min_value = T::FeeMultiplierUpdate::min(); + + let target = target + addition; + >::set_block_consumed_resources(target, 0); let next = T::FeeMultiplierUpdate::convert(min_value); assert!( @@ -523,6 +549,14 @@ where /// Require the transactor pay for themselves and maybe include a tip to gain additional priority /// in the queue. +/// +/// # Transaction Validity +/// +/// This extension sets the `priority` field of `TransactionValidity` depending on the amount +/// of tip being paid per weight unit. +/// +/// Operational transactions will receive an additional priority bump, so that they are normally +/// considered before regular transactions. #[derive(Encode, Decode, Clone, Eq, PartialEq, TypeInfo)] #[scale_info(skip_type_params(T))] pub struct ChargeTransactionPayment(#[codec(compact)] BalanceOf); @@ -564,27 +598,73 @@ where .map(|i| (fee, i)) } - /// Get an appropriate priority for a transaction with the given length and info. + /// Get an appropriate priority for a transaction with the given `DispatchInfo`, encoded length + /// and user-included tip. /// - /// This will try and optimise the `fee/weight` `fee/length`, whichever is consuming more of the - /// maximum corresponding limit. + /// The priority is based on the amount of `tip` the user is willing to pay per unit of either + /// `weight` or `length`, depending which one is more limitting. For `Operational` extrinsics + /// we add a "virtual tip" to the calculations. /// - /// For example, if a transaction consumed 1/4th of the block length and half of the weight, its - /// final priority is `fee * min(2, 4) = fee * 2`. If it consumed `1/4th` of the block length - /// and the entire block weight `(1/1)`, its priority is `fee * min(1, 4) = fee * 1`. This means - /// that the transaction which consumes more resources (either length or weight) with the same - /// `fee` ends up having lower priority. - fn get_priority( - len: usize, + /// The formula should simply be `tip / bounded_{weight|length}`, but since we are using + /// integer division, we have no guarantees it's going to give results in any reasonable + /// range (might simply end up being zero). Hence we use a scaling factor: + /// `tip * (max_block_{weight|length} / bounded_{weight|length})`, since given current + /// state of-the-art blockchains, number of per-block transactions is expected to be in a + /// range reasonable enough to not saturate the `Balance` type while multiplying by the tip. + pub fn get_priority( info: &DispatchInfoOf, + len: usize, + tip: BalanceOf, final_fee: BalanceOf, ) -> TransactionPriority { - let weight_saturation = T::BlockWeights::get().max_block / info.weight.max(1); - let max_block_length = *T::BlockLength::get().max.get(DispatchClass::Normal); - let len_saturation = max_block_length as u64 / (len as u64).max(1); - let coefficient: BalanceOf = - weight_saturation.min(len_saturation).saturated_into::>(); - final_fee.saturating_mul(coefficient).saturated_into::() + // Calculate how many such extrinsics we could fit into an empty block and take + // the limitting factor. + let max_block_weight = T::BlockWeights::get().max_block; + let max_block_length = *T::BlockLength::get().max.get(info.class) as u64; + + let bounded_weight = info.weight.max(1).min(max_block_weight); + let bounded_length = (len as u64).max(1).min(max_block_length); + + let max_tx_per_block_weight = max_block_weight / bounded_weight; + let max_tx_per_block_length = max_block_length / bounded_length; + // Given our current knowledge this value is going to be in a reasonable range - i.e. + // less than 10^9 (2^30), so multiplying by the `tip` value is unlikely to overflow the + // balance type. We still use saturating ops obviously, but the point is to end up with some + // `priority` distribution instead of having all transactions saturate the priority. + let max_tx_per_block = max_tx_per_block_length + .min(max_tx_per_block_weight) + .saturated_into::>(); + let max_reward = |val: BalanceOf| val.saturating_mul(max_tx_per_block); + + // To distribute no-tip transactions a little bit, we set the minimal tip as `1`. + // This means that given two transactions without a tip, smaller one will be preferred. + let tip = tip.max(1.saturated_into()); + let scaled_tip = max_reward(tip); + + match info.class { + DispatchClass::Normal => { + // For normal class we simply take the `tip_per_weight`. + scaled_tip + }, + DispatchClass::Mandatory => { + // Mandatory extrinsics should be prohibited (e.g. by the [`CheckWeight`] + // extensions), but just to be safe let's return the same priority as `Normal` here. + scaled_tip + }, + DispatchClass::Operational => { + // A "virtual tip" value added to an `Operational` extrinsic. + // This value should be kept high enough to allow `Operational` extrinsics + // to get in even during congestion period, but at the same time low + // enough to prevent a possible spam attack by sending invalid operational + // extrinsics which push away regular transactions from the pool. + let fee_multiplier = T::OperationalFeeMultiplier::get().saturated_into(); + let virtual_tip = final_fee.saturating_mul(fee_multiplier); + let scaled_virtual_tip = max_reward(virtual_tip); + + scaled_tip.saturating_add(scaled_virtual_tip) + }, + } + .saturated_into::() } } @@ -627,8 +707,12 @@ where info: &DispatchInfoOf, len: usize, ) -> TransactionValidity { - let (fee, _) = self.withdraw_fee(who, call, info, len)?; - Ok(ValidTransaction { priority: Self::get_priority(len, info, fee), ..Default::default() }) + let (final_fee, _) = self.withdraw_fee(who, call, info, len)?; + let tip = self.0; + Ok(ValidTransaction { + priority: Self::get_priority(info, len, tip, final_fee), + ..Default::default() + }) } fn pre_dispatch( @@ -741,6 +825,7 @@ mod tests { pub const BlockHashCount: u64 = 250; pub static TransactionByteFee: u64 = 1; pub static WeightToFee: u64 = 1; + pub static OperationalFeeMultiplier: u8 = 5; } impl frame_system::Config for Runtime { @@ -820,6 +905,7 @@ mod tests { impl Config for Runtime { type OnChargeTransaction = CurrencyAdapter; type TransactionByteFee = TransactionByteFee; + type OperationalFeeMultiplier = OperationalFeeMultiplier; type WeightToFee = WeightToFee; type FeeMultiplierUpdate = (); } @@ -1333,6 +1419,79 @@ mod tests { }); } + #[test] + fn should_alter_operational_priority() { + let tip = 5; + let len = 10; + + ExtBuilder::default().balance_factor(100).build().execute_with(|| { + let normal = + DispatchInfo { weight: 100, class: DispatchClass::Normal, pays_fee: Pays::Yes }; + let priority = ChargeTransactionPayment::(tip) + .validate(&2, CALL, &normal, len) + .unwrap() + .priority; + + assert_eq!(priority, 50); + + let priority = ChargeTransactionPayment::(2 * tip) + .validate(&2, CALL, &normal, len) + .unwrap() + .priority; + + assert_eq!(priority, 100); + }); + + ExtBuilder::default().balance_factor(100).build().execute_with(|| { + let op = DispatchInfo { + weight: 100, + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; + let priority = ChargeTransactionPayment::(tip) + .validate(&2, CALL, &op, len) + .unwrap() + .priority; + assert_eq!(priority, 5800); + + let priority = ChargeTransactionPayment::(2 * tip) + .validate(&2, CALL, &op, len) + .unwrap() + .priority; + assert_eq!(priority, 6100); + }); + } + + #[test] + fn no_tip_has_some_priority() { + let tip = 0; + let len = 10; + + ExtBuilder::default().balance_factor(100).build().execute_with(|| { + let normal = + DispatchInfo { weight: 100, class: DispatchClass::Normal, pays_fee: Pays::Yes }; + let priority = ChargeTransactionPayment::(tip) + .validate(&2, CALL, &normal, len) + .unwrap() + .priority; + + assert_eq!(priority, 10); + }); + + ExtBuilder::default().balance_factor(100).build().execute_with(|| { + let op = DispatchInfo { + weight: 100, + class: DispatchClass::Operational, + pays_fee: Pays::Yes, + }; + let priority = ChargeTransactionPayment::(tip) + .validate(&2, CALL, &op, len) + .unwrap() + .priority; + assert_eq!(priority, 5510); + }); + } + #[test] fn post_info_can_change_pays_fee() { ExtBuilder::default() diff --git a/frame/transaction-storage/Cargo.toml b/frame/transaction-storage/Cargo.toml index a4ebd5cfbc876..bcd3fd145f575 100644 --- a/frame/transaction-storage/Cargo.toml +++ b/frame/transaction-storage/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Unlicense" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Storage chain pallet" readme = "README.md" @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] serde = { version = "1.0.126", optional = true } -hex-literal = { version = "0.3.1", optional = true } +hex-literal = { version = "0.3.3", optional = true } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } scale-info = { version = "1.0", default-features = false, features = ["derive"] } frame-support = { version = "4.0.0-dev", default-features = false, path = "../support" } diff --git a/frame/transaction-storage/src/benchmarking.rs b/frame/transaction-storage/src/benchmarking.rs index d5da6a42b46f0..6ca9b247f0228 100644 --- a/frame/transaction-storage/src/benchmarking.rs +++ b/frame/transaction-storage/src/benchmarking.rs @@ -20,7 +20,7 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; -use frame_benchmarking::{benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_benchmarking::{benchmarks, whitelisted_caller}; use frame_support::traits::{Currency, OnFinalize, OnInitialize}; use frame_system::{EventRecord, Pallet as System, RawOrigin}; use sp_runtime::traits::{Bounded, One, Zero}; @@ -143,6 +143,6 @@ benchmarks! { verify { assert_last_event::(Event::ProofChecked.into()); } -} -impl_benchmark_test_suite!(TransactionStorage, crate::mock::new_test_ext(), crate::mock::Test); + impl_benchmark_test_suite!(TransactionStorage, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/frame/transaction-storage/src/lib.rs b/frame/transaction-storage/src/lib.rs index 2fe3c04e0229f..bc31199d90391 100644 --- a/frame/transaction-storage/src/lib.rs +++ b/frame/transaction-storage/src/lib.rs @@ -37,7 +37,7 @@ use sp_runtime::traits::{BlakeTwo256, Hash, One, Saturating, Zero}; use sp_std::{prelude::*, result}; use sp_transaction_storage_proof::{ encode_index, random_chunk, InherentError, TransactionStorageProof, CHUNK_SIZE, - DEFAULT_STORAGE_PERIOD, INHERENT_IDENTIFIER, + INHERENT_IDENTIFIER, }; /// A type alias for the balance type from this pallet's point of view. @@ -380,7 +380,7 @@ pub mod pallet { Self { byte_fee: 10u32.into(), entry_fee: 1000u32.into(), - storage_period: DEFAULT_STORAGE_PERIOD.into(), + storage_period: sp_transaction_storage_proof::DEFAULT_STORAGE_PERIOD.into(), max_block_transactions: DEFAULT_MAX_BLOCK_TRANSACTIONS, max_transaction_size: DEFAULT_MAX_TRANSACTION_SIZE, } diff --git a/frame/treasury/Cargo.toml b/frame/treasury/Cargo.toml index b2991f3febcad..bcbe41985c655 100644 --- a/frame/treasury/Cargo.toml +++ b/frame/treasury/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet to manage treasury" readme = "README.md" diff --git a/frame/treasury/src/benchmarking.rs b/frame/treasury/src/benchmarking.rs index 2fe0bad704f2b..8570b0efdb945 100644 --- a/frame/treasury/src/benchmarking.rs +++ b/frame/treasury/src/benchmarking.rs @@ -21,7 +21,7 @@ use super::{Pallet as Treasury, *}; -use frame_benchmarking::{account, benchmarks_instance_pallet, impl_benchmark_test_suite}; +use frame_benchmarking::{account, benchmarks_instance_pallet}; use frame_support::{ensure, traits::OnInitialize}; use frame_system::RawOrigin; @@ -94,6 +94,6 @@ benchmarks_instance_pallet! { }: { Treasury::::on_initialize(T::BlockNumber::zero()); } -} -impl_benchmark_test_suite!(Treasury, crate::tests::new_test_ext(), crate::tests::Test); + impl_benchmark_test_suite!(Treasury, crate::tests::new_test_ext(), crate::tests::Test); +} diff --git a/frame/try-runtime/Cargo.toml b/frame/try-runtime/Cargo.toml index 0ff534767607d..611a844278269 100644 --- a/frame/try-runtime/Cargo.toml +++ b/frame/try-runtime/Cargo.toml @@ -4,7 +4,7 @@ version = "0.10.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet for democracy" readme = "README.md" diff --git a/frame/uniques/Cargo.toml b/frame/uniques/Cargo.toml index 4f664ecc2b6a9..f240bb98afab7 100644 --- a/frame/uniques/Cargo.toml +++ b/frame/uniques/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME NFT asset management pallet" readme = "README.md" diff --git a/frame/uniques/src/benchmarking.rs b/frame/uniques/src/benchmarking.rs index 5c777dc961e9e..0e161bf7bfe85 100644 --- a/frame/uniques/src/benchmarking.rs +++ b/frame/uniques/src/benchmarking.rs @@ -21,8 +21,7 @@ use super::*; use frame_benchmarking::{ - account, benchmarks_instance_pallet, impl_benchmark_test_suite, whitelist_account, - whitelisted_caller, + account, benchmarks_instance_pallet, whitelist_account, whitelisted_caller, }; use frame_support::{ dispatch::UnfilteredDispatchable, @@ -379,6 +378,6 @@ benchmarks_instance_pallet! { verify { assert_last_event::(Event::ApprovalCancelled(class, instance, caller, delegate).into()); } -} -impl_benchmark_test_suite!(Uniques, crate::mock::new_test_ext(), crate::mock::Test); + impl_benchmark_test_suite!(Uniques, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/frame/uniques/src/functions.rs b/frame/uniques/src/functions.rs index a878a4910f769..68acf7f1879fb 100644 --- a/frame/uniques/src/functions.rs +++ b/frame/uniques/src/functions.rs @@ -80,6 +80,41 @@ impl, I: 'static> Pallet { Ok(()) } + pub(super) fn do_destroy_class( + class: T::ClassId, + witness: DestroyWitness, + maybe_check_owner: Option, + ) -> Result { + Class::::try_mutate_exists(class, |maybe_details| { + let class_details = maybe_details.take().ok_or(Error::::Unknown)?; + if let Some(check_owner) = maybe_check_owner { + ensure!(class_details.owner == check_owner, Error::::NoPermission); + } + ensure!(class_details.instances == witness.instances, Error::::BadWitness); + ensure!( + class_details.instance_metadatas == witness.instance_metadatas, + Error::::BadWitness + ); + ensure!(class_details.attributes == witness.attributes, Error::::BadWitness); + + for (instance, details) in Asset::::drain_prefix(&class) { + Account::::remove((&details.owner, &class, &instance)); + } + InstanceMetadataOf::::remove_prefix(&class, None); + ClassMetadataOf::::remove(&class); + Attribute::::remove_prefix((&class,), None); + T::Currency::unreserve(&class_details.owner, class_details.total_deposit); + + Self::deposit_event(Event::Destroyed(class)); + + Ok(DestroyWitness { + instances: class_details.instances, + instance_metadatas: class_details.instance_metadatas, + attributes: class_details.attributes, + }) + }) + } + pub(super) fn do_mint( class: T::ClassId, instance: T::InstanceId, diff --git a/frame/uniques/src/impl_nonfungibles.rs b/frame/uniques/src/impl_nonfungibles.rs index c5d5c6089f865..e68d2d4deecda 100644 --- a/frame/uniques/src/impl_nonfungibles.rs +++ b/frame/uniques/src/impl_nonfungibles.rs @@ -19,13 +19,10 @@ use super::*; use frame_support::{ - traits::{ - tokens::nonfungibles::{Create, Inspect, InspectEnumerable, Mutate, Transfer}, - Get, - }, + traits::{tokens::nonfungibles::*, Get}, BoundedSlice, }; -use sp_runtime::DispatchResult; +use sp_runtime::{DispatchError, DispatchResult}; use sp_std::convert::TryFrom; impl, I: 'static> Inspect<::AccountId> for Pallet { @@ -106,6 +103,22 @@ impl, I: 'static> Create<::AccountId> for Pallet } } +impl, I: 'static> Destroy<::AccountId> for Pallet { + type DestroyWitness = DestroyWitness; + + fn get_destroy_witness(class: &Self::ClassId) -> Option { + Class::::get(class).map(|a| a.destroy_witness()) + } + + fn destroy( + class: Self::ClassId, + witness: Self::DestroyWitness, + maybe_check_owner: Option, + ) -> Result { + Self::do_destroy_class(class, witness, maybe_check_owner) + } +} + impl, I: 'static> Mutate<::AccountId> for Pallet { fn mint_into( class: &Self::ClassId, diff --git a/frame/uniques/src/lib.rs b/frame/uniques/src/lib.rs index 8c716694051b5..1bf220e4a7876 100644 --- a/frame/uniques/src/lib.rs +++ b/frame/uniques/src/lib.rs @@ -381,37 +381,19 @@ pub mod pallet { origin: OriginFor, #[pallet::compact] class: T::ClassId, witness: DestroyWitness, - ) -> DispatchResult { + ) -> DispatchResultWithPostInfo { let maybe_check_owner = match T::ForceOrigin::try_origin(origin) { Ok(_) => None, Err(origin) => Some(ensure_signed(origin)?), }; - Class::::try_mutate_exists(class, |maybe_details| { - let class_details = maybe_details.take().ok_or(Error::::Unknown)?; - if let Some(check_owner) = maybe_check_owner { - ensure!(class_details.owner == check_owner, Error::::NoPermission); - } - ensure!(class_details.instances == witness.instances, Error::::BadWitness); - ensure!( - class_details.instance_metadatas == witness.instance_metadatas, - Error::::BadWitness - ); - ensure!(class_details.attributes == witness.attributes, Error::::BadWitness); - - for (instance, details) in Asset::::drain_prefix(&class) { - Account::::remove((&details.owner, &class, &instance)); - } - InstanceMetadataOf::::remove_prefix(&class, None); - ClassMetadataOf::::remove(&class); - Attribute::::remove_prefix((&class,), None); - T::Currency::unreserve(&class_details.owner, class_details.total_deposit); - - Self::deposit_event(Event::Destroyed(class)); - - // NOTE: could use postinfo to reflect the actual number of - // accounts/sufficient/approvals - Ok(()) - }) + let details = Self::do_destroy_class(class, witness, maybe_check_owner)?; + + Ok(Some(T::WeightInfo::destroy( + details.instances, + details.instance_metadatas, + details.attributes, + )) + .into()) } /// Mint an asset instance of a particular class. diff --git a/frame/utility/Cargo.toml b/frame/utility/Cargo.toml index b5b8eab9cdbf3..0a0a9eafd845b 100644 --- a/frame/utility/Cargo.toml +++ b/frame/utility/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME utilities pallet" readme = "README.md" diff --git a/frame/utility/src/benchmarking.rs b/frame/utility/src/benchmarking.rs index 210a6156499cf..70cc61f87b9c9 100644 --- a/frame/utility/src/benchmarking.rs +++ b/frame/utility/src/benchmarking.rs @@ -20,7 +20,7 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; -use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_benchmarking::{account, benchmarks, whitelisted_caller}; use frame_system::RawOrigin; const SEED: u32 = 0; @@ -63,6 +63,6 @@ benchmarks! { verify { assert_last_event::(Event::BatchCompleted.into()) } -} -impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::tests::Test); + impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::tests::Test); +} diff --git a/frame/vesting/Cargo.toml b/frame/vesting/Cargo.toml index 806e0e6036862..3179607b3f6d7 100644 --- a/frame/vesting/Cargo.toml +++ b/frame/vesting/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet for manage vesting" readme = "README.md" diff --git a/frame/vesting/src/benchmarking.rs b/frame/vesting/src/benchmarking.rs index 5cdc14c8fdaca..b52ddac3e8857 100644 --- a/frame/vesting/src/benchmarking.rs +++ b/frame/vesting/src/benchmarking.rs @@ -19,7 +19,7 @@ #![cfg(feature = "runtime-benchmarks")] -use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_benchmarking::{account, benchmarks, whitelisted_caller}; use frame_support::assert_ok; use frame_system::{Pallet as System, RawOrigin}; use sp_runtime::traits::{Bounded, CheckedDiv, CheckedMul}; @@ -374,10 +374,10 @@ benchmarks! { T::Currency::transfer(&caller, &test_dest, expected_balance, ExistenceRequirement::AllowDeath) ); } -} -impl_benchmark_test_suite!( - Vesting, - crate::mock::ExtBuilder::default().existential_deposit(256).build(), - crate::mock::Test, -); + impl_benchmark_test_suite!( + Vesting, + crate::mock::ExtBuilder::default().existential_deposit(256).build(), + crate::mock::Test, + ); +} diff --git a/primitives/api/Cargo.toml b/primitives/api/Cargo.toml index 7e751232acb50..c57c3730fc7b6 100644 --- a/primitives/api/Cargo.toml +++ b/primitives/api/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Substrate runtime api primitives" readme = "README.md" diff --git a/primitives/api/proc-macro/Cargo.toml b/primitives/api/proc-macro/Cargo.toml index d5909967ac5a4..d9dd0bf9020c7 100644 --- a/primitives/api/proc-macro/Cargo.toml +++ b/primitives/api/proc-macro/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Macros for declaring and implementing runtime apis." documentation = "https://docs.rs/sp-api-proc-macro" @@ -16,8 +16,8 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -quote = "1.0.3" -syn = { version = "1.0.58", features = ["full", "fold", "extra-traits", "visit"] } +quote = "1.0.10" +syn = { version = "1.0.80", features = ["full", "fold", "extra-traits", "visit"] } proc-macro2 = "1.0.29" blake2-rfc = { version = "0.2.18", default-features = false } proc-macro-crate = "1.0.0" diff --git a/primitives/api/test/Cargo.toml b/primitives/api/test/Cargo.toml index b78c9abb80dc6..faee5ebdc77db 100644 --- a/primitives/api/test/Cargo.toml +++ b/primitives/api/test/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" publish = false -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" [package.metadata.docs.rs] diff --git a/primitives/api/test/tests/ui/empty_impl_runtime_apis_call.stderr b/primitives/api/test/tests/ui/empty_impl_runtime_apis_call.stderr index b08f056b57d1c..bf201e8b55a78 100644 --- a/primitives/api/test/tests/ui/empty_impl_runtime_apis_call.stderr +++ b/primitives/api/test/tests/ui/empty_impl_runtime_apis_call.stderr @@ -4,4 +4,4 @@ error: No api implementation given! 17 | sp_api::impl_runtime_apis! {} | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `sp_api::impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr b/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr index 9dd84c24b6781..2fb06c3565ea2 100644 --- a/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr +++ b/primitives/api/test/tests/ui/impl_incorrect_method_signature.stderr @@ -38,7 +38,7 @@ error[E0053]: method `Api_test_runtime_api_impl` has an incompatible type for tr | = note: expected fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option, Vec<_>) -> Result<_, _>` found fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option, Vec<_>) -> Result<_, _>` - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `sp_api::impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0308]: mismatched types --> $DIR/impl_incorrect_method_signature.rs:17:1 @@ -52,7 +52,7 @@ error[E0308]: mismatched types 33 | | } | |_^ expected `u64`, found struct `std::string::String` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `sp_api::impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0308]: mismatched types --> $DIR/impl_incorrect_method_signature.rs:19:11 diff --git a/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.stderr b/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.stderr index 47cd9e01d910f..befe67c1d0b4a 100644 --- a/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.stderr +++ b/primitives/api/test/tests/ui/mock_advanced_block_id_by_value.stderr @@ -10,4 +10,4 @@ error: `BlockId` needs to be taken by reference and not by value! 19 | | } | |_^ | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `sp_api::mock_impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/primitives/api/test/tests/ui/mock_only_self_reference.stderr b/primitives/api/test/tests/ui/mock_only_self_reference.stderr index 7385fe4745989..1b1d2553940a5 100644 --- a/primitives/api/test/tests/ui/mock_only_self_reference.stderr +++ b/primitives/api/test/tests/ui/mock_only_self_reference.stderr @@ -36,7 +36,7 @@ error[E0053]: method `Api_test_runtime_api_impl` has an incompatible type for tr | = note: expected fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option, Vec<_>) -> Result<_, _>` found fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option<()>, Vec<_>) -> Result<_, _>` - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `sp_api::mock_impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0053]: method `Api_test2_runtime_api_impl` has an incompatible type for trait --> $DIR/mock_only_self_reference.rs:12:1 @@ -64,4 +64,4 @@ error[E0053]: method `Api_test2_runtime_api_impl` has an incompatible type for t | = note: expected fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option, Vec<_>) -> Result<_, _>` found fn pointer `fn(&MockApi, &BlockId, Extrinsic>>, ExecutionContext, Option<()>, Vec<_>) -> Result<_, _>` - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `sp_api::mock_impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr index a0a16c4a493db..063cbff60f81e 100644 --- a/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr +++ b/primitives/api/test/tests/ui/type_reference_in_impl_runtime_apis_call.stderr @@ -38,7 +38,7 @@ error[E0053]: method `Api_test_runtime_api_impl` has an incompatible type for tr | = note: expected fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option, Vec<_>) -> Result<_, _>` found fn pointer `fn(&RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall>, &BlockId<__SR_API_BLOCK__>, ExecutionContext, std::option::Option<&u64>, Vec<_>) -> Result<_, _>` - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `sp_api::impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0308]: mismatched types --> $DIR/type_reference_in_impl_runtime_apis_call.rs:17:1 @@ -52,7 +52,7 @@ error[E0308]: mismatched types 35 | | } | |_^ expected `u64`, found `&u64` | - = note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the macro `sp_api::impl_runtime_apis` (in Nightly builds, run with -Z macro-backtrace for more info) error[E0308]: mismatched types --> $DIR/type_reference_in_impl_runtime_apis_call.rs:19:11 diff --git a/primitives/application-crypto/Cargo.toml b/primitives/application-crypto/Cargo.toml index 6849dc25f8561..88411d86392af 100644 --- a/primitives/application-crypto/Cargo.toml +++ b/primitives/application-crypto/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] edition = "2018" description = "Provides facilities for generating application specific crypto wrapper types." license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" documentation = "https://docs.rs/sp-application-crypto" readme = "README.md" diff --git a/primitives/application-crypto/test/Cargo.toml b/primitives/application-crypto/test/Cargo.toml index 468bfee3cc010..d10f011c4c603 100644 --- a/primitives/application-crypto/test/Cargo.toml +++ b/primitives/application-crypto/test/Cargo.toml @@ -6,7 +6,7 @@ edition = "2018" description = "Integration tests for application-crypto" license = "Apache-2.0" publish = false -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" [package.metadata.docs.rs] diff --git a/primitives/arithmetic/Cargo.toml b/primitives/arithmetic/Cargo.toml index abdbd4e60d041..92b16b895e3b5 100644 --- a/primitives/arithmetic/Cargo.toml +++ b/primitives/arithmetic/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Minimal fixed point arithmetic primitives and types for runtime." documentation = "https://docs.rs/sp-arithmetic" diff --git a/primitives/arithmetic/fuzzer/Cargo.toml b/primitives/arithmetic/fuzzer/Cargo.toml index d10eccfc7c74a..5f2d5801ff995 100644 --- a/primitives/arithmetic/fuzzer/Cargo.toml +++ b/primitives/arithmetic/fuzzer/Cargo.toml @@ -4,7 +4,7 @@ version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Fuzzer for fixed point arithmetic primitives." documentation = "https://docs.rs/sp-arithmetic-fuzzer" diff --git a/primitives/authority-discovery/Cargo.toml b/primitives/authority-discovery/Cargo.toml index 6638e478b4cd7..d4c75dda352ca 100644 --- a/primitives/authority-discovery/Cargo.toml +++ b/primitives/authority-discovery/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] description = "Authority discovery primitives" edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" diff --git a/primitives/authorship/Cargo.toml b/primitives/authorship/Cargo.toml index 15e4dc57ff5ab..cee82ca77c375 100644 --- a/primitives/authorship/Cargo.toml +++ b/primitives/authorship/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] description = "Authorship primitives" edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" diff --git a/primitives/beefy/Cargo.toml b/primitives/beefy/Cargo.toml new file mode 100644 index 0000000000000..633ac0e8fbcd1 --- /dev/null +++ b/primitives/beefy/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "beefy-primitives" +version = "4.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" + +[dependencies] +codec = { version = "2.2.0", package = "parity-scale-codec", default-features = false, features = ["derive"] } +scale-info = { version = "1.0", default-features = false, features = ["derive"] } + +sp-api = { version = "4.0.0-dev", path = "../api", default-features = false } +sp-application-crypto = { version = "4.0.0-dev", path = "../application-crypto", default-features = false } +sp-core = { version = "4.0.0-dev", path = "../core", default-features = false } +sp-runtime = { version = "4.0.0-dev", path = "../runtime", default-features = false } +sp-std = { version = "4.0.0-dev", path = "../std", default-features = false } + +[dev-dependencies] +hex-literal = "0.3" + +sp-keystore = { version = "0.10.0-dev", path = "../keystore" } + +[features] +default = ["std"] +std = [ + "codec/std", + "scale-info/std", + "sp-api/std", + "sp-application-crypto/std", + "sp-core/std", + "sp-runtime/std", + "sp-std/std", +] diff --git a/primitives/beefy/src/commitment.rs b/primitives/beefy/src/commitment.rs new file mode 100644 index 0000000000000..7aab93bbcb973 --- /dev/null +++ b/primitives/beefy/src/commitment.rs @@ -0,0 +1,264 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use sp_std::{cmp, prelude::*}; + +use crate::{crypto::Signature, ValidatorSetId}; + +/// A commitment signed by GRANDPA validators as part of BEEFY protocol. +/// +/// The commitment contains a [payload] extracted from the finalized block at height [block_number]. +/// GRANDPA validators collect signatures on commitments and a stream of such signed commitments +/// (see [SignedCommitment]) forms the BEEFY protocol. +#[derive(Clone, Debug, PartialEq, Eq, codec::Encode, codec::Decode)] +pub struct Commitment { + /// The payload being signed. + /// + /// This should be some form of cumulative representation of the chain (think MMR root hash). + /// The payload should also contain some details that allow the light client to verify next + /// validator set. The protocol does not enforce any particular format of this data, + /// nor how often it should be present in commitments, however the light client has to be + /// provided with full validator set whenever it performs the transition (i.e. importing first + /// block with [validator_set_id] incremented). + pub payload: TPayload, + + /// Finalized block number this commitment is for. + /// + /// GRANDPA validators agree on a block they create a commitment for and start collecting + /// signatures. This process is called a round. + /// There might be multiple rounds in progress (depending on the block choice rule), however + /// since the payload is supposed to be cumulative, it is not required to import all + /// commitments. + /// BEEFY light client is expected to import at least one commitment per epoch, + /// but is free to import as many as it requires. + pub block_number: TBlockNumber, + + /// BEEFY validator set supposed to sign this commitment. + /// + /// Validator set is changing once per epoch. The Light Client must be provided by details + /// about the validator set whenever it's importing first commitment with a new + /// `validator_set_id`. Validator set data MUST be verifiable, for instance using [payload] + /// information. + pub validator_set_id: ValidatorSetId, +} + +impl cmp::PartialOrd for Commitment +where + TBlockNumber: cmp::Ord, + TPayload: cmp::Eq, +{ + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl cmp::Ord for Commitment +where + TBlockNumber: cmp::Ord, + TPayload: cmp::Eq, +{ + fn cmp(&self, other: &Self) -> cmp::Ordering { + self.validator_set_id + .cmp(&other.validator_set_id) + .then_with(|| self.block_number.cmp(&other.block_number)) + } +} + +/// A commitment with matching GRANDPA validators' signatures. +#[derive(Clone, Debug, PartialEq, Eq, codec::Encode, codec::Decode)] +pub struct SignedCommitment { + /// The commitment signatures are collected for. + pub commitment: Commitment, + /// GRANDPA validators' signatures for the commitment. + /// + /// The length of this `Vec` must match number of validators in the current set (see + /// [Commitment::validator_set_id]). + pub signatures: Vec>, +} + +impl SignedCommitment { + /// Return the number of collected signatures. + pub fn no_of_signatures(&self) -> usize { + self.signatures.iter().filter(|x| x.is_some()).count() + } +} + +/// A [SignedCommitment] with a version number. This variant will be appended +/// to the block justifications for the block for which the signed commitment +/// has been generated. +#[derive(Clone, Debug, PartialEq, codec::Encode, codec::Decode)] +pub enum VersionedCommitment { + #[codec(index = 1)] + /// Current active version + V1(SignedCommitment), +} + +#[cfg(test)] +mod tests { + + use sp_core::{keccak_256, Pair}; + use sp_keystore::{testing::KeyStore, SyncCryptoStore, SyncCryptoStorePtr}; + + use super::*; + use codec::Decode; + + use crate::{crypto, KEY_TYPE}; + + type TestCommitment = Commitment; + type TestSignedCommitment = SignedCommitment; + type TestVersionedCommitment = VersionedCommitment; + + // The mock signatures are equivalent to the ones produced by the BEEFY keystore + fn mock_signatures() -> (crypto::Signature, crypto::Signature) { + let store: SyncCryptoStorePtr = KeyStore::new().into(); + + let alice = sp_core::ecdsa::Pair::from_string("//Alice", None).unwrap(); + let _ = + SyncCryptoStore::insert_unknown(&*store, KEY_TYPE, "//Alice", alice.public().as_ref()) + .unwrap(); + + let msg = keccak_256(b"This is the first message"); + let sig1 = SyncCryptoStore::ecdsa_sign_prehashed(&*store, KEY_TYPE, &alice.public(), &msg) + .unwrap() + .unwrap(); + + let msg = keccak_256(b"This is the second message"); + let sig2 = SyncCryptoStore::ecdsa_sign_prehashed(&*store, KEY_TYPE, &alice.public(), &msg) + .unwrap() + .unwrap(); + + (sig1.into(), sig2.into()) + } + + #[test] + fn commitment_encode_decode() { + // given + let commitment: TestCommitment = + Commitment { payload: "Hello World!".into(), block_number: 5, validator_set_id: 0 }; + + // when + let encoded = codec::Encode::encode(&commitment); + let decoded = TestCommitment::decode(&mut &*encoded); + + // then + assert_eq!(decoded, Ok(commitment)); + assert_eq!( + encoded, + hex_literal::hex!( + "3048656c6c6f20576f726c6421050000000000000000000000000000000000000000000000" + ) + ); + } + + #[test] + fn signed_commitment_encode_decode() { + // given + let commitment: TestCommitment = + Commitment { payload: "Hello World!".into(), block_number: 5, validator_set_id: 0 }; + + let sigs = mock_signatures(); + + let signed = SignedCommitment { + commitment, + signatures: vec![None, None, Some(sigs.0), Some(sigs.1)], + }; + + // when + let encoded = codec::Encode::encode(&signed); + let decoded = TestSignedCommitment::decode(&mut &*encoded); + + // then + assert_eq!(decoded, Ok(signed)); + assert_eq!( + encoded, + hex_literal::hex!( + "3048656c6c6f20576f726c64210500000000000000000000000000000000000000000000001000 + 0001558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e99a830e314d + 10dd3cd68ce3dc0c33c86e99bcb7816f9ba01012d6e1f8105c337a86cdd9aaacdc496577f3db8c55ef9e6fd48f2c5c05a + 2274707491635d8ba3df64f324575b7b2a34487bca2324b6a0046395a71681be3d0c2a00" + ) + ); + } + + #[test] + fn signed_commitment_count_signatures() { + // given + let commitment: TestCommitment = + Commitment { payload: "Hello World!".into(), block_number: 5, validator_set_id: 0 }; + + let sigs = mock_signatures(); + + let mut signed = SignedCommitment { + commitment, + signatures: vec![None, None, Some(sigs.0), Some(sigs.1)], + }; + assert_eq!(signed.no_of_signatures(), 2); + + // when + signed.signatures[2] = None; + + // then + assert_eq!(signed.no_of_signatures(), 1); + } + + #[test] + fn commitment_ordering() { + fn commitment( + block_number: u128, + validator_set_id: crate::ValidatorSetId, + ) -> TestCommitment { + Commitment { payload: "Hello World!".into(), block_number, validator_set_id } + } + + // given + let a = commitment(1, 0); + let b = commitment(2, 1); + let c = commitment(10, 0); + let d = commitment(10, 1); + + // then + assert!(a < b); + assert!(a < c); + assert!(c < b); + assert!(c < d); + assert!(b < d); + } + + #[test] + fn versioned_commitment_encode_decode() { + let commitment: TestCommitment = + Commitment { payload: "Hello World!".into(), block_number: 5, validator_set_id: 0 }; + + let sigs = mock_signatures(); + + let signed = SignedCommitment { + commitment, + signatures: vec![None, None, Some(sigs.0), Some(sigs.1)], + }; + + let versioned = TestVersionedCommitment::V1(signed.clone()); + + let encoded = codec::Encode::encode(&versioned); + + assert_eq!(1, encoded[0]); + assert_eq!(encoded[1..], codec::Encode::encode(&signed)); + + let decoded = TestVersionedCommitment::decode(&mut &*encoded); + + assert_eq!(decoded, Ok(versioned)); + } +} diff --git a/primitives/beefy/src/lib.rs b/primitives/beefy/src/lib.rs new file mode 100644 index 0000000000000..790b915ab98db --- /dev/null +++ b/primitives/beefy/src/lib.rs @@ -0,0 +1,137 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#![cfg_attr(not(feature = "std"), no_std)] +#![warn(missing_docs)] + +//! Primitives for BEEFY protocol. +//! +//! The crate contains shared data types used by BEEFY protocol and documentation (in a form of +//! code) for building a BEEFY light client. +//! +//! BEEFY is a gadget that runs alongside another finality gadget (for instance GRANDPA). +//! For simplicity (and the initially intended use case) the documentation says GRANDPA in places +//! where a more abstract "Finality Gadget" term could be used, but there is no reason why BEEFY +//! wouldn't run with some other finality scheme. +//! BEEFY validator set is supposed to be tracking the Finality Gadget validator set, but note that +//! it will use a different set of keys. For Polkadot use case we plan to use `secp256k1` for BEEFY, +//! while GRANDPA uses `ed25519`. + +mod commitment; +pub mod mmr; +pub mod witness; + +pub use commitment::{Commitment, SignedCommitment, VersionedCommitment}; + +use codec::{Codec, Decode, Encode}; +use scale_info::TypeInfo; +use sp_core::H256; +use sp_std::prelude::*; + +/// Key type for BEEFY module. +pub const KEY_TYPE: sp_application_crypto::KeyTypeId = sp_application_crypto::KeyTypeId(*b"beef"); + +/// BEEFY cryptographic types +/// +/// This module basically introduces three crypto types: +/// - `crypto::Pair` +/// - `crypto::Public` +/// - `crypto::Signature` +/// +/// Your code should use the above types as concrete types for all crypto related +/// functionality. +/// +/// The current underlying crypto scheme used is ECDSA. This can be changed, +/// without affecting code restricted against the above listed crypto types. +pub mod crypto { + use sp_application_crypto::{app_crypto, ecdsa}; + app_crypto!(ecdsa, crate::KEY_TYPE); + + /// Identity of a BEEFY authority using ECDSA as its crypto. + pub type AuthorityId = Public; + + /// Signature for a BEEFY authority using ECDSA as its crypto. + pub type AuthoritySignature = Signature; +} + +/// The `ConsensusEngineId` of BEEFY. +pub const BEEFY_ENGINE_ID: sp_runtime::ConsensusEngineId = *b"BEEF"; + +/// Authority set id starts with zero at genesis +pub const GENESIS_AUTHORITY_SET_ID: u64 = 0; + +/// A typedef for validator set id. +pub type ValidatorSetId = u64; + +/// A set of BEEFY authorities, a.k.a. validators. +#[derive(Decode, Encode, Debug, PartialEq, Clone, TypeInfo)] +pub struct ValidatorSet { + /// Public keys of the validator set elements + pub validators: Vec, + /// Identifier of the validator set + pub id: ValidatorSetId, +} + +impl ValidatorSet { + /// Return an empty validator set with id of 0. + pub fn empty() -> Self { + Self { validators: Default::default(), id: Default::default() } + } +} + +/// The index of an authority. +pub type AuthorityIndex = u32; + +/// The type used to represent an MMR root hash. +pub type MmrRootHash = H256; + +/// A consensus log item for BEEFY. +#[derive(Decode, Encode, TypeInfo)] +pub enum ConsensusLog { + /// The authorities have changed. + #[codec(index = 1)] + AuthoritiesChange(ValidatorSet), + /// Disable the authority with given index. + #[codec(index = 2)] + OnDisabled(AuthorityIndex), + /// MMR root hash. + #[codec(index = 3)] + MmrRoot(MmrRootHash), +} + +/// BEEFY vote message. +/// +/// A vote message is a direct vote created by a BEEFY node on every voting round +/// and is gossiped to its peers. +#[derive(Debug, Decode, Encode, TypeInfo)] +pub struct VoteMessage { + /// Commit to information extracted from a finalized block + pub commitment: Commitment, + /// Node authority id + pub id: Id, + /// Node signature + pub signature: Signature, +} + +sp_api::decl_runtime_apis! { + /// API necessary for BEEFY voters. + pub trait BeefyApi + { + /// Return the current active BEEFY validator set + fn validator_set() -> ValidatorSet; + } +} diff --git a/primitives/beefy/src/mmr.rs b/primitives/beefy/src/mmr.rs new file mode 100644 index 0000000000000..e428c0ea01215 --- /dev/null +++ b/primitives/beefy/src/mmr.rs @@ -0,0 +1,132 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! BEEFY + MMR utilties. +//! +//! While BEEFY can be used completely indepentently as an additional consensus gadget, +//! it is designed around a main use case of making bridging standalone networks together. +//! For that use case it's common to use some aggregated data structure (like MMR) to be +//! used in conjunction with BEEFY, to be able to efficiently prove any past blockchain data. +//! +//! This module contains primitives used by Polkadot implementation of the BEEFY+MMR bridge, +//! but we imagine they will be useful for other chains that either want to bridge with Polkadot +//! or are completely standalone, but heavily inspired by Polkadot. + +use codec::{Decode, Encode}; +use scale_info::TypeInfo; + +/// A standard leaf that gets added every block to the MMR constructed by Substrate's `pallet_mmr`. +#[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] +pub struct MmrLeaf { + /// Version of the leaf format. + /// + /// Can be used to enable future format migrations and compatibility. + /// See [`MmrLeafVersion`] documentation for details. + pub version: MmrLeafVersion, + /// Current block parent number and hash. + pub parent_number_and_hash: (BlockNumber, Hash), + /// A merkle root of the next BEEFY authority set. + pub beefy_next_authority_set: BeefyNextAuthoritySet, + /// A merkle root of all registered parachain heads. + pub parachain_heads: MerkleRoot, +} + +/// A MMR leaf versioning scheme. +/// +/// Version is a single byte that constist of two components: +/// - `major` - 3 bits +/// - `minor` - 5 bits +/// +/// Any change in encoding that adds new items to the structure is considered non-breaking, hence +/// only requires an update of `minor` version. Any backward incompatible change (i.e. decoding to a +/// previous leaf format fails) should be indicated with `major` version bump. +/// +/// Given that adding new struct elements in SCALE is backward compatible (i.e. old format can be +/// still decoded, the new fields will simply be ignored). We expect the major version to be bumped +/// very rarely (hopefuly never). +#[derive(Debug, Default, PartialEq, Eq, Clone, Encode, Decode)] +pub struct MmrLeafVersion(u8); +impl MmrLeafVersion { + /// Create new version object from `major` and `minor` components. + /// + /// Panics if any of the component occupies more than 4 bits. + pub fn new(major: u8, minor: u8) -> Self { + if major > 0b111 || minor > 0b11111 { + panic!("Version components are too big."); + } + let version = (major << 5) + minor; + Self(version) + } + + /// Split the version into `major` and `minor` sub-components. + pub fn split(&self) -> (u8, u8) { + let major = self.0 >> 5; + let minor = self.0 & 0b11111; + (major, minor) + } +} + +/// Details of the next BEEFY authority set. +#[derive(Debug, Default, PartialEq, Eq, Clone, Encode, Decode, TypeInfo)] +pub struct BeefyNextAuthoritySet { + /// Id of the next set. + /// + /// Id is required to correlate BEEFY signed commitments with the validator set. + /// Light Client can easily verify that the commitment witness it is getting is + /// produced by the latest validator set. + pub id: crate::ValidatorSetId, + /// Number of validators in the set. + /// + /// Some BEEFY Light Clients may use an interactive protocol to verify only subset + /// of signatures. We put set length here, so that these clients can verify the minimal + /// number of required signatures. + pub len: u32, + /// Merkle Root Hash build from BEEFY AuthorityIds. + /// + /// This is used by Light Clients to confirm that the commitments are signed by the correct + /// validator set. Light Clients using interactive protocol, might verify only subset of + /// signatures, hence don't require the full list here (will receive inclusion proofs). + pub root: MerkleRoot, +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn should_construct_version_correctly() { + let tests = vec![(0, 0, 0b00000000), (7, 2, 0b11100010), (7, 31, 0b11111111)]; + + for (major, minor, version) in tests { + let v = MmrLeafVersion::new(major, minor); + assert_eq!(v.encode(), vec![version], "Encoding does not match."); + assert_eq!(v.split(), (major, minor)); + } + } + + #[test] + #[should_panic] + fn should_panic_if_major_too_large() { + MmrLeafVersion::new(8, 0); + } + + #[test] + #[should_panic] + fn should_panic_if_minor_too_large() { + MmrLeafVersion::new(0, 32); + } +} diff --git a/primitives/beefy/src/witness.rs b/primitives/beefy/src/witness.rs new file mode 100644 index 0000000000000..c28a464e72df5 --- /dev/null +++ b/primitives/beefy/src/witness.rs @@ -0,0 +1,162 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Primitives for light, 2-phase interactive verification protocol. +//! +//! Instead of submitting full list of signatures, it's possible to submit first a witness +//! form of [SignedCommitment]. +//! This can later be verified by the client requesting only some (out of all) signatures for +//! verification. This allows lowering the data and computation cost of verifying the +//! signed commitment. + +use sp_std::prelude::*; + +use crate::{ + commitment::{Commitment, SignedCommitment}, + crypto::Signature, +}; + +/// A light form of [SignedCommitment]. +/// +/// This is a light ("witness") form of the signed commitment. Instead of containing full list of +/// signatures, which might be heavy and expensive to verify, it only contains a bit vector of +/// validators which signed the original [SignedCommitment] and a merkle root of all signatures. +/// +/// This can be used by light clients for 2-phase interactive verification (for instance for +/// Ethereum Mainnet), in a commit-reveal like scheme, where first we submit only the signed +/// commitment witness and later on, the client picks only some signatures to verify at random. +#[derive(Debug, PartialEq, Eq, codec::Encode, codec::Decode)] +pub struct SignedCommitmentWitness { + /// The full content of the commitment. + pub commitment: Commitment, + + /// The bit vector of validators who signed the commitment. + pub signed_by: Vec, // TODO [ToDr] Consider replacing with bitvec crate + + /// A merkle root of signatures in the original signed commitment. + pub signatures_merkle_root: TMerkleRoot, +} + +impl + SignedCommitmentWitness +{ + /// Convert [SignedCommitment] into [SignedCommitmentWitness]. + /// + /// This takes a [SignedCommitment], which contains full signatures + /// and converts it into a witness form, which does not contain full signatures, + /// only a bit vector indicating which validators have signed the original [SignedCommitment] + /// and a merkle root of all signatures. + /// + /// Returns the full list of signatures along with the witness. + pub fn from_signed( + signed: SignedCommitment, + merkelize: TMerkelize, + ) -> (Self, Vec>) + where + TMerkelize: FnOnce(&[Option]) -> TMerkleRoot, + { + let SignedCommitment { commitment, signatures } = signed; + let signed_by = signatures.iter().map(|s| s.is_some()).collect(); + let signatures_merkle_root = merkelize(&signatures); + + (Self { commitment, signed_by, signatures_merkle_root }, signatures) + } +} + +#[cfg(test)] +mod tests { + + use sp_core::{keccak_256, Pair}; + use sp_keystore::{testing::KeyStore, SyncCryptoStore, SyncCryptoStorePtr}; + + use super::*; + use codec::Decode; + + use crate::{crypto, KEY_TYPE}; + + type TestCommitment = Commitment; + type TestSignedCommitment = SignedCommitment; + type TestSignedCommitmentWitness = + SignedCommitmentWitness>>; + + // The mock signatures are equivalent to the ones produced by the BEEFY keystore + fn mock_signatures() -> (crypto::Signature, crypto::Signature) { + let store: SyncCryptoStorePtr = KeyStore::new().into(); + + let alice = sp_core::ecdsa::Pair::from_string("//Alice", None).unwrap(); + let _ = + SyncCryptoStore::insert_unknown(&*store, KEY_TYPE, "//Alice", alice.public().as_ref()) + .unwrap(); + + let msg = keccak_256(b"This is the first message"); + let sig1 = SyncCryptoStore::ecdsa_sign_prehashed(&*store, KEY_TYPE, &alice.public(), &msg) + .unwrap() + .unwrap(); + + let msg = keccak_256(b"This is the second message"); + let sig2 = SyncCryptoStore::ecdsa_sign_prehashed(&*store, KEY_TYPE, &alice.public(), &msg) + .unwrap() + .unwrap(); + + (sig1.into(), sig2.into()) + } + + fn signed_commitment() -> TestSignedCommitment { + let commitment: TestCommitment = + Commitment { payload: "Hello World!".into(), block_number: 5, validator_set_id: 0 }; + + let sigs = mock_signatures(); + + SignedCommitment { commitment, signatures: vec![None, None, Some(sigs.0), Some(sigs.1)] } + } + + #[test] + fn should_convert_signed_commitment_to_witness() { + // given + let signed = signed_commitment(); + + // when + let (witness, signatures) = + TestSignedCommitmentWitness::from_signed(signed, |sigs| sigs.to_vec()); + + // then + assert_eq!(witness.signatures_merkle_root, signatures); + } + + #[test] + fn should_encode_and_decode_witness() { + // given + let signed = signed_commitment(); + let (witness, _) = TestSignedCommitmentWitness::from_signed(signed, |sigs| sigs.to_vec()); + + // when + let encoded = codec::Encode::encode(&witness); + let decoded = TestSignedCommitmentWitness::decode(&mut &*encoded); + + // then + assert_eq!(decoded, Ok(witness)); + assert_eq!( + encoded, + hex_literal::hex!( + "3048656c6c6f20576f726c64210500000000000000000000000000000000000000000000001000 + 00010110000001558455ad81279df0795cc985580e4fb75d72d948d1107b2ac80a09abed4da8480c746cc321f2319a5e9 + 9a830e314d10dd3cd68ce3dc0c33c86e99bcb7816f9ba01012d6e1f8105c337a86cdd9aaacdc496577f3db8c55ef9e6fd + 48f2c5c05a2274707491635d8ba3df64f324575b7b2a34487bca2324b6a0046395a71681be3d0c2a00" + ) + ); + } +} diff --git a/primitives/block-builder/Cargo.toml b/primitives/block-builder/Cargo.toml index d7fa0f2ef85cf..8499bdf8e1c70 100644 --- a/primitives/block-builder/Cargo.toml +++ b/primitives/block-builder/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "The block builder runtime api." readme = "README.md" diff --git a/primitives/blockchain/Cargo.toml b/primitives/blockchain/Cargo.toml index 66d9152c230df..93daef5fa1a27 100644 --- a/primitives/blockchain/Cargo.toml +++ b/primitives/blockchain/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Substrate blockchain traits and primitives." documentation = "https://docs.rs/sp-blockchain" diff --git a/primitives/blockchain/src/backend.rs b/primitives/blockchain/src/backend.rs index bb34a0449b5f7..fc70ce845dc98 100644 --- a/primitives/blockchain/src/backend.rs +++ b/primitives/blockchain/src/backend.rs @@ -281,6 +281,8 @@ pub struct Info { pub finalized_state: Option<(Block::Hash, <::Header as HeaderT>::Number)>, /// Number of concurrent leave forks. pub number_leaves: usize, + /// Missing blocks after warp sync. (start, end). + pub block_gap: Option<(NumberFor, NumberFor)>, } /// Block status. diff --git a/primitives/consensus/aura/Cargo.toml b/primitives/consensus/aura/Cargo.toml index c228b88fd6570..ca73cf206de02 100644 --- a/primitives/consensus/aura/Cargo.toml +++ b/primitives/consensus/aura/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] description = "Primitives for Aura consensus" edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" diff --git a/primitives/consensus/babe/Cargo.toml b/primitives/consensus/babe/Cargo.toml index 5f6bfec219739..1690b7c9a02d3 100644 --- a/primitives/consensus/babe/Cargo.toml +++ b/primitives/consensus/babe/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] description = "Primitives for BABE consensus" edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" diff --git a/primitives/consensus/babe/src/digests.rs b/primitives/consensus/babe/src/digests.rs index 470a028021ca1..1c908fe61fc0b 100644 --- a/primitives/consensus/babe/src/digests.rs +++ b/primitives/consensus/babe/src/digests.rs @@ -21,7 +21,7 @@ use super::{ AllowedSlots, AuthorityId, AuthorityIndex, AuthoritySignature, BabeAuthorityWeight, BabeEpochConfiguration, Slot, BABE_ENGINE_ID, }; -use codec::{Codec, Decode, Encode}; +use codec::{Codec, Decode, Encode, MaxEncodedLen}; use sp_runtime::{DigestItem, RuntimeDebug}; use sp_std::vec::Vec; @@ -134,7 +134,9 @@ pub struct NextEpochDescriptor { /// Information about the next epoch config, if changed. This is broadcast in the first /// block of the epoch, and applies using the same rules as `NextEpochDescriptor`. -#[derive(Decode, Encode, PartialEq, Eq, Clone, RuntimeDebug, scale_info::TypeInfo)] +#[derive( + Decode, Encode, PartialEq, Eq, Clone, RuntimeDebug, MaxEncodedLen, scale_info::TypeInfo, +)] pub enum NextConfigDescriptor { /// Version 1. #[codec(index = 1)] diff --git a/primitives/consensus/babe/src/lib.rs b/primitives/consensus/babe/src/lib.rs index 4417670f4144b..560866cfb2ab5 100644 --- a/primitives/consensus/babe/src/lib.rs +++ b/primitives/consensus/babe/src/lib.rs @@ -28,7 +28,7 @@ pub use sp_consensus_vrf::schnorrkel::{ Randomness, RANDOMNESS_LENGTH, VRF_OUTPUT_LENGTH, VRF_PROOF_LENGTH, }; -use codec::{Decode, Encode}; +use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; @@ -214,7 +214,7 @@ pub struct BabeGenesisConfiguration { } /// Types of allowed slots. -#[derive(Clone, Copy, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] +#[derive(Clone, Copy, PartialEq, Eq, Encode, Decode, RuntimeDebug, MaxEncodedLen, TypeInfo)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub enum AllowedSlots { /// Only allow primary slots. @@ -247,7 +247,7 @@ impl sp_consensus::SlotData for BabeGenesisConfiguration { } /// Configuration data used by the BABE consensus engine. -#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo)] +#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, MaxEncodedLen, TypeInfo)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub struct BabeEpochConfiguration { /// A constant value that is used in the threshold calculation formula. diff --git a/primitives/consensus/common/Cargo.toml b/primitives/consensus/common/Cargo.toml index ecfc1c1b31826..9a5488abba653 100644 --- a/primitives/consensus/common/Cargo.toml +++ b/primitives/consensus/common/Cargo.toml @@ -4,7 +4,7 @@ version = "0.10.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Common utilities for building and using consensus engines in substrate." documentation = "https://docs.rs/sp-consensus/" diff --git a/primitives/consensus/common/src/select_chain.rs b/primitives/consensus/common/src/select_chain.rs index 5408fc86b7bd4..fd8b06ecf8abb 100644 --- a/primitives/consensus/common/src/select_chain.rs +++ b/primitives/consensus/common/src/select_chain.rs @@ -50,7 +50,7 @@ pub trait SelectChain: Sync + Send + Clone { &self, target_hash: ::Hash, _maybe_max_number: Option>, - ) -> Result::Hash>, Error> { - Ok(Some(target_hash)) + ) -> Result<::Hash, Error> { + Ok(target_hash) } } diff --git a/primitives/consensus/pow/Cargo.toml b/primitives/consensus/pow/Cargo.toml index f93eeca2fb24e..406ed3dea46a5 100644 --- a/primitives/consensus/pow/Cargo.toml +++ b/primitives/consensus/pow/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] description = "Primitives for Aura consensus" edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" diff --git a/primitives/consensus/slots/Cargo.toml b/primitives/consensus/slots/Cargo.toml index 3ad204f973961..014ee9b93e6e8 100644 --- a/primitives/consensus/slots/Cargo.toml +++ b/primitives/consensus/slots/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] description = "Primitives for slots-based consensus" edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" diff --git a/primitives/consensus/vrf/Cargo.toml b/primitives/consensus/vrf/Cargo.toml index 124cbf423f068..c103e68eb66b2 100644 --- a/primitives/consensus/vrf/Cargo.toml +++ b/primitives/consensus/vrf/Cargo.toml @@ -6,7 +6,7 @@ description = "Primitives for VRF based consensus" edition = "2018" license = "Apache-2.0" repository = "https://github.com/paritytech/substrate/" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" readme = "README.md" [package.metadata.docs.rs] diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index 73c3d454ed584..dd721d744f573 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Shareable Substrate types." documentation = "https://docs.rs/sp-core" @@ -30,17 +30,17 @@ impl-serde = { version = "0.3.0", optional = true } wasmi = { version = "0.9.0", optional = true } hash-db = { version = "0.15.2", default-features = false } hash256-std-hasher = { version = "0.15.2", default-features = false } -base58 = { version = "0.1.0", optional = true } +base58 = { version = "0.2.0", optional = true } rand = { version = "0.7.3", optional = true, features = ["small_rng"] } substrate-bip39 = { version = "0.4.2", optional = true } -tiny-bip39 = { version = "0.8", optional = true } -regex = { version = "1.4.2", optional = true } +tiny-bip39 = { version = "0.8.2", optional = true } +regex = { version = "1.5.4", optional = true } num-traits = { version = "0.2.8", default-features = false } -zeroize = { version = "1.4.1", default-features = false } -secrecy = { version = "0.7.0", default-features = false } +zeroize = { version = "1.4.2", default-features = false } +secrecy = { version = "0.8.0", default-features = false } lazy_static = { version = "1.4.0", default-features = false, optional = true } parking_lot = { version = "0.11.1", optional = true } -sp-debug-derive = { version = "3.0.0", path = "../debug-derive" } +sp-debug-derive = { version = "3.0.0", default-features = false, path = "../debug-derive" } sp-externalities = { version = "0.10.0-dev", optional = true, path = "../externalities" } sp-storage = { version = "4.0.0-dev", default-features = false, path = "../storage" } parity-util-mem = { version = "0.10.0", default-features = false, features = [ @@ -61,17 +61,17 @@ schnorrkel = { version = "0.9.1", features = [ "preaudit_deprecated", "u64_backend", ], default-features = false, optional = true } -sha2 = { version = "0.9.2", default-features = false, optional = true } +sha2 = { version = "0.9.8", default-features = false, optional = true } hex = { version = "0.4", default-features = false, optional = true } -twox-hash = { version = "1.5.0", default-features = false, optional = true } +twox-hash = { version = "1.6.1", default-features = false, optional = true } libsecp256k1 = { version = "0.6", default-features = false, features = ["hmac", "static-context"], optional = true } merlin = { version = "2.0", default-features = false, optional = true } - +ss58-registry = "1.0.0" sp-runtime-interface = { version = "4.0.0-dev", default-features = false, path = "../runtime-interface" } [dev-dependencies] sp-serializer = { version = "3.0.0", path = "../serializer" } -hex-literal = "0.3.1" +hex-literal = "0.3.3" rand = "0.7.2" criterion = "0.3.3" serde_json = "1.0" diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index 4764a0cac1b14..21b8520c7780f 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -26,8 +26,6 @@ use crate::{ed25519, sr25519}; use base58::{FromBase58, ToBase58}; use codec::{Decode, Encode, MaxEncodedLen}; #[cfg(feature = "std")] -use parking_lot::Mutex; -#[cfg(feature = "std")] use rand::{rngs::OsRng, RngCore}; #[cfg(feature = "std")] use regex::Regex; @@ -38,14 +36,15 @@ pub use secrecy::ExposeSecret; #[cfg(feature = "std")] pub use secrecy::SecretString; use sp_runtime_interface::pass_by::PassByInner; -#[cfg(feature = "std")] -use sp_std::convert::TryInto; #[doc(hidden)] pub use sp_std::ops::Deref; use sp_std::{convert::TryFrom, hash::Hash, str, vec::Vec}; /// Trait to zeroize a memory buffer. pub use zeroize::Zeroize; +#[cfg(feature = "full_crypto")] +pub use ss58_registry::{from_known_address_format, Ss58AddressFormat, Ss58AddressFormatRegistry}; + /// The root phrase for our publicly known keys. pub const DEV_PHRASE: &str = "bottom drive obey lake curtain smoke basket hold race lonely fit walk"; @@ -227,7 +226,7 @@ pub trait Ss58Codec: Sized + AsMut<[u8]> + AsRef<[u8]> + Default { /// A format filterer, can be used to ensure that `from_ss58check` family only decode for /// allowed identifiers. By default just refuses the two reserved identifiers. fn format_is_allowed(f: Ss58AddressFormat) -> bool { - !matches!(f, Ss58AddressFormat::Reserved46 | Ss58AddressFormat::Reserved47) + !f.is_reserved() } /// Some if the string is a properly encoded SS58Check address. @@ -235,7 +234,7 @@ pub trait Ss58Codec: Sized + AsMut<[u8]> + AsRef<[u8]> + Default { fn from_ss58check(s: &str) -> Result { Self::from_ss58check_with_version(s).and_then(|(r, v)| match v { v if !v.is_custom() => Ok(r), - v if v == *DEFAULT_VERSION.lock() => Ok(r), + v if v == default_ss58_version() => Ok(r), _ => Err(PublicError::UnknownVersion), }) } @@ -270,7 +269,7 @@ pub trait Ss58Codec: Sized + AsMut<[u8]> + AsRef<[u8]> + Default { if data.len() != prefix_len + body_len + CHECKSUM_LEN { return Err(PublicError::BadLength) } - let format = ident.try_into().map_err(|_: ()| PublicError::UnknownVersion)?; + let format = ident.into(); if !Self::format_is_allowed(format) { return Err(PublicError::FormatNotAllowed) } @@ -291,7 +290,7 @@ pub trait Ss58Codec: Sized + AsMut<[u8]> + AsRef<[u8]> + Default { fn from_string(s: &str) -> Result { Self::from_string_with_version(s).and_then(|(r, v)| match v { v if !v.is_custom() => Ok(r), - v if v == *DEFAULT_VERSION.lock() => Ok(r), + v if v == default_ss58_version() => Ok(r), _ => Err(PublicError::UnknownVersion), }) } @@ -322,7 +321,7 @@ pub trait Ss58Codec: Sized + AsMut<[u8]> + AsRef<[u8]> + Default { /// Return the ss58-check string for this key. #[cfg(feature = "std")] fn to_ss58check(&self) -> String { - self.to_ss58check_with_version(*DEFAULT_VERSION.lock()) + self.to_ss58check_with_version(default_ss58_version()) } /// Some if the string is a properly encoded SS58Check address, optionally with @@ -355,280 +354,30 @@ fn ss58hash(data: &[u8]) -> blake2_rfc::blake2b::Blake2bResult { context.finalize() } +/// Default prefix number #[cfg(feature = "std")] -lazy_static::lazy_static! { - static ref DEFAULT_VERSION: Mutex - = Mutex::new(Ss58AddressFormat::SubstrateAccount); -} - -#[cfg(feature = "full_crypto")] -macro_rules! ss58_address_format { - ( $( $identifier:tt => ($number:expr, $name:expr, $desc:tt) )* ) => ( - /// A known address (sub)format/network ID for SS58. - #[derive(Copy, Clone, PartialEq, Eq, crate::RuntimeDebug)] - pub enum Ss58AddressFormat { - $(#[doc = $desc] $identifier),*, - /// Use a manually provided numeric value as a standard identifier - Custom(u16), - } - - #[cfg(feature = "std")] - impl std::fmt::Display for Ss58AddressFormat { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - match self { - $( - Ss58AddressFormat::$identifier => write!(f, "{}", $name), - )* - Ss58AddressFormat::Custom(x) => write!(f, "{}", x), - } - - } - } - - static ALL_SS58_ADDRESS_FORMATS: [Ss58AddressFormat; 0 $(+ { let _ = $number; 1})*] = [ - $(Ss58AddressFormat::$identifier),*, - ]; - - impl Ss58AddressFormat { - /// names of all address formats - pub fn all_names() -> &'static [&'static str] { - &[ - $($name),*, - ] - } - /// All known address formats. - pub fn all() -> &'static [Ss58AddressFormat] { - &ALL_SS58_ADDRESS_FORMATS - } - - /// Whether the address is custom. - pub fn is_custom(&self) -> bool { - matches!(self, Self::Custom(_)) - } - } - - impl TryFrom for Ss58AddressFormat { - type Error = (); - - fn try_from(x: u8) -> Result { - Ss58AddressFormat::try_from(x as u16) - } - } - - impl From for u16 { - fn from(x: Ss58AddressFormat) -> u16 { - match x { - $(Ss58AddressFormat::$identifier => $number),*, - Ss58AddressFormat::Custom(n) => n, - } - } - } - - impl TryFrom for Ss58AddressFormat { - type Error = (); - - fn try_from(x: u16) -> Result { - match x { - $($number => Ok(Ss58AddressFormat::$identifier)),*, - _ => Ok(Ss58AddressFormat::Custom(x)), - } - } - } - - /// Error encountered while parsing `Ss58AddressFormat` from &'_ str - /// unit struct for now. - #[derive(Copy, Clone, PartialEq, Eq, crate::RuntimeDebug)] - pub struct ParseError; - - impl<'a> TryFrom<&'a str> for Ss58AddressFormat { - type Error = ParseError; - - fn try_from(x: &'a str) -> Result { - match x { - $($name => Ok(Ss58AddressFormat::$identifier)),*, - a => a.parse::().map(Ss58AddressFormat::Custom).map_err(|_| ParseError), - } - } - } - - #[cfg(feature = "std")] - impl std::str::FromStr for Ss58AddressFormat { - type Err = ParseError; - - fn from_str(data: &str) -> Result { - Self::try_from(data) - } - } - - #[cfg(feature = "std")] - impl std::fmt::Display for ParseError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "failed to parse network value as u8") - } - } - - #[cfg(feature = "std")] - impl Default for Ss58AddressFormat { - fn default() -> Self { - *DEFAULT_VERSION.lock() - } - } +static DEFAULT_VERSION: core::sync::atomic::AtomicU16 = std::sync::atomic::AtomicU16::new( + from_known_address_format(Ss58AddressFormatRegistry::SubstrateAccount), +); - #[cfg(feature = "std")] - impl From for String { - fn from(x: Ss58AddressFormat) -> String { - x.to_string() - } - } - ) +/// Returns default ss58 format used by the current active process. +#[cfg(feature = "std")] +pub fn default_ss58_version() -> Ss58AddressFormat { + DEFAULT_VERSION.load(std::sync::atomic::Ordering::Relaxed).into() } -#[cfg(feature = "full_crypto")] -ss58_address_format!( - PolkadotAccount => - (0, "polkadot", "Polkadot Relay-chain, standard account (*25519).") - BareSr25519 => - (1, "sr25519", "Bare 32-bit Schnorr/Ristretto 25519 (S/R 25519) key.") - KusamaAccount => - (2, "kusama", "Kusama Relay-chain, standard account (*25519).") - BareEd25519 => - (3, "ed25519", "Bare 32-bit Edwards Ed25519 key.") - KatalChainAccount => - (4, "katalchain", "Katal Chain, standard account (*25519).") - PlasmAccount => - (5, "plasm", "Plasm Network, standard account (*25519).") - BifrostAccount => - (6, "bifrost", "Bifrost mainnet, direct checksum, standard account (*25519).") - EdgewareAccount => - (7, "edgeware", "Edgeware mainnet, standard account (*25519).") - KaruraAccount => - (8, "karura", "Acala Karura canary network, standard account (*25519).") - ReynoldsAccount => - (9, "reynolds", "Laminar Reynolds canary network, standard account (*25519).") - AcalaAccount => - (10, "acala", "Acala mainnet, standard account (*25519).") - LaminarAccount => - (11, "laminar", "Laminar mainnet, standard account (*25519).") - PolymathAccount => - (12, "polymath", "Polymath network, standard account (*25519).") - SubstraTeeAccount => - (13, "substratee", "Any SubstraTEE off-chain network private account (*25519).") - TotemAccount => - (14, "totem", "Any Totem Live Accounting network standard account (*25519).") - SynesthesiaAccount => - (15, "synesthesia", "Synesthesia mainnet, standard account (*25519).") - KulupuAccount => - (16, "kulupu", "Kulupu mainnet, standard account (*25519).") - DarkAccount => - (17, "dark", "Dark mainnet, standard account (*25519).") - DarwiniaAccount => - (18, "darwinia", "Darwinia Chain mainnet, standard account (*25519).") - GeekAccount => - (19, "geek", "GeekCash mainnet, standard account (*25519).") - StafiAccount => - (20, "stafi", "Stafi mainnet, standard account (*25519).") - DockTestAccount => - (21, "dock-testnet", "Dock testnet, standard account (*25519).") - DockMainAccount => - (22, "dock-mainnet", "Dock mainnet, standard account (*25519).") - ShiftNrg => - (23, "shift", "ShiftNrg mainnet, standard account (*25519).") - ZeroAccount => - (24, "zero", "ZERO mainnet, standard account (*25519).") - AlphavilleAccount => - (25, "alphaville", "ZERO testnet, standard account (*25519).") - JupiterAccount => - (26, "jupiter", "Jupiter testnet, standard account (*25519).") - SubsocialAccount => - (28, "subsocial", "Subsocial network, standard account (*25519).") - DhiwayAccount => - (29, "cord", "Dhiway CORD network, standard account (*25519).") - PhalaAccount => - (30, "phala", "Phala Network, standard account (*25519).") - LitentryAccount => - (31, "litentry", "Litentry Network, standard account (*25519).") - RobonomicsAccount => - (32, "robonomics", "Any Robonomics network standard account (*25519).") - DataHighwayAccount => - (33, "datahighway", "DataHighway mainnet, standard account (*25519).") - AresAccount => - (34, "ares", "Ares Protocol, standard account (*25519).") - ValiuAccount => - (35, "vln", "Valiu Liquidity Network mainnet, standard account (*25519).") - CentrifugeAccount => - (36, "centrifuge", "Centrifuge Chain mainnet, standard account (*25519).") - NodleAccount => - (37, "nodle", "Nodle Chain mainnet, standard account (*25519).") - KiltAccount => - (38, "kilt", "KILT Chain mainnet, standard account (*25519).") - PolimecAccount => - (41, "poli", "Polimec Chain mainnet, standard account (*25519).") - SubstrateAccount => - (42, "substrate", "Any Substrate network, standard account (*25519).") - BareSecp256k1 => - (43, "secp256k1", "Bare ECDSA SECP256k1 key.") - ChainXAccount => - (44, "chainx", "ChainX mainnet, standard account (*25519).") - UniartsAccount => - (45, "uniarts", "UniArts Chain mainnet, standard account (*25519).") - Reserved46 => - (46, "reserved46", "Reserved for future use (46).") - Reserved47 => - (47, "reserved47", "Reserved for future use (47).") - NeatcoinAccount => - (48, "neatcoin", "Neatcoin mainnet, standard account (*25519).") - PicassoAccount => - (49, "picasso", "Composable Canary Network, standard account (*25519).") - ComposableAccount => - (50, "composable", "Composable mainnet, standard account (*25519).") - HydraDXAccount => - (63, "hydradx", "HydraDX standard account (*25519).") - AventusAccount => - (65, "aventus", "Aventus Chain mainnet, standard account (*25519).") - CrustAccount => - (66, "crust", "Crust Network, standard account (*25519).") - EquilibriumAccount => - (67, "equilibrium", "Equilibrium Network, standard account (*25519).") - SoraAccount => - (69, "sora", "SORA Network, standard account (*25519).") - ZeitgeistAccount => - (73, "zeitgeist", "Zeitgeist network, standard account (*25519).") - MantaAccount => - (77, "manta", "Manta Network, standard account (*25519).") - CalamariAccount => - (78, "calamari", "Manta Canary Network, standard account (*25519).") - PolkaSmith => - (98, "polkasmith", "PolkaSmith Canary Network, standard account (*25519).") - PolkaFoundry => - (99, "polkafoundry", "PolkaFoundry Network, standard account (*25519).") - OriginTrailAccount => - (101, "origintrail-parachain", "OriginTrail Parachain, ethereumm account (ECDSA).") - HeikoAccount => - (110, "heiko", "Heiko, session key (*25519).") - CloverAccount => - (128, "clover", "Clover Finance, standard account (*25519).") - ParallelAccount => - (172, "parallel", "Parallel, session key (*25519).") - SocialAccount => - (252, "social-network", "Social Network, standard account (*25519).") - Moonbeam => - (1284, "moonbeam", "Moonbeam, session key (*25519).") - Moonriver => - (1285, "moonriver", "Moonriver, session key (*25519).") - BasiliskAccount => - (10041, "basilisk", "Basilisk standard account (*25519).") - - // Note: 16384 and above are reserved. -); +/// Returns either the input address format or the default. +#[cfg(feature = "std")] +pub fn unwrap_or_default_ss58_version(network: Option) -> Ss58AddressFormat { + network.unwrap_or_else(default_ss58_version) +} /// Set the default "version" (actually, this is a bit of a misnomer and the version byte is /// typically used not just to encode format/version but also network identity) that is used for -/// encoding and decoding SS58 addresses. If an unknown version is provided then it fails. -/// -/// See `ss58_address_format!` for all current known "versions". +/// encoding and decoding SS58 addresses. #[cfg(feature = "std")] -pub fn set_default_ss58_version(version: Ss58AddressFormat) { - *DEFAULT_VERSION.lock() = version +pub fn set_default_ss58_version(new_default: Ss58AddressFormat) { + DEFAULT_VERSION.store(new_default.into(), std::sync::atomic::Ordering::Relaxed); } #[cfg(feature = "std")] diff --git a/primitives/core/src/ecdsa.rs b/primitives/core/src/ecdsa.rs index 11e9b9d71d80e..2751a0c40e3e5 100644 --- a/primitives/core/src/ecdsa.rs +++ b/primitives/core/src/ecdsa.rs @@ -640,7 +640,10 @@ impl CryptoType for Pair { mod test { use super::*; use crate::{ - crypto::{set_default_ss58_version, PublicError, DEV_PHRASE}, + crypto::{ + set_default_ss58_version, PublicError, Ss58AddressFormat, Ss58AddressFormatRegistry, + DEV_PHRASE, + }, keccak_256, }; use hex_literal::hex; @@ -772,26 +775,24 @@ mod test { #[test] fn ss58check_format_check_works() { - use crate::crypto::Ss58AddressFormat; let pair = Pair::from_seed(b"12345678901234567890123456789012"); let public = pair.public(); - let format = Ss58AddressFormat::Reserved46; + let format = Ss58AddressFormatRegistry::Reserved46Account.into(); let s = public.to_ss58check_with_version(format); assert_eq!(Public::from_ss58check_with_version(&s), Err(PublicError::FormatNotAllowed)); } #[test] fn ss58check_full_roundtrip_works() { - use crate::crypto::Ss58AddressFormat; let pair = Pair::from_seed(b"12345678901234567890123456789012"); let public = pair.public(); - let format = Ss58AddressFormat::PolkadotAccount; + let format = Ss58AddressFormatRegistry::PolkadotAccount.into(); let s = public.to_ss58check_with_version(format); let (k, f) = Public::from_ss58check_with_version(&s).unwrap(); assert_eq!(k, public); assert_eq!(f, format); - let format = Ss58AddressFormat::Custom(64); + let format = Ss58AddressFormat::custom(64); let s = public.to_ss58check_with_version(format); let (k, f) = Public::from_ss58check_with_version(&s).unwrap(); assert_eq!(k, public); @@ -805,10 +806,10 @@ mod test { if std::env::var("RUN_CUSTOM_FORMAT_TEST") == Ok("1".into()) { use crate::crypto::Ss58AddressFormat; // temp save default format version - let default_format = Ss58AddressFormat::default(); + let default_format = crate::crypto::default_ss58_version(); // set current ss58 version is custom "200" `Ss58AddressFormat::Custom(200)` - set_default_ss58_version(Ss58AddressFormat::Custom(200)); + set_default_ss58_version(Ss58AddressFormat::custom(200)); // custom addr encoded by version 200 let addr = "4pbsSkWcBaYoFHrKJZp5fDVUKbqSYD9dhZZGvpp3vQ5ysVs5ybV"; Public::from_ss58check(&addr).unwrap(); diff --git a/primitives/database/Cargo.toml b/primitives/database/Cargo.toml index c99651d4ef042..c3d2d8ce99df9 100644 --- a/primitives/database/Cargo.toml +++ b/primitives/database/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Substrate database trait." documentation = "https://docs.rs/sp-database" diff --git a/primitives/debug-derive/Cargo.toml b/primitives/debug-derive/Cargo.toml index 0d3ba805100c4..954d0f89663fe 100644 --- a/primitives/debug-derive/Cargo.toml +++ b/primitives/debug-derive/Cargo.toml @@ -4,7 +4,7 @@ version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Macros to derive runtime debug implementation." documentation = "https://docs.rs/sp-debug-derive" @@ -17,11 +17,17 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -quote = "1.0.3" -syn = "1.0.58" +quote = "1.0.10" +syn = "1.0.80" proc-macro2 = "1.0" [features] +default = [ "std" ] std = [] +# By default `RuntimeDebug` implements `Debug` that outputs `` when `std` is +# disabled. However, sometimes downstream users need to have the real `Debug` implementation for +# debugging purposes. If this is required, a user only needs to add this crate as a dependency of +# their runtime and enable the `force-debug` feature. +force-debug = [] [dev-dependencies] diff --git a/primitives/debug-derive/src/impls.rs b/primitives/debug-derive/src/impls.rs index 4d79ee9880160..0e67ef81bc853 100644 --- a/primitives/debug-derive/src/impls.rs +++ b/primitives/debug-derive/src/impls.rs @@ -43,7 +43,7 @@ pub fn debug_derive(ast: DeriveInput) -> proc_macro::TokenStream { gen.into() } -#[cfg(not(feature = "std"))] +#[cfg(all(not(feature = "std"), not(feature = "force-debug")))] mod implementation { use super::*; @@ -58,7 +58,7 @@ mod implementation { } } -#[cfg(feature = "std")] +#[cfg(any(feature = "std", feature = "force-debug"))] mod implementation { use super::*; use proc_macro2::Span; diff --git a/primitives/externalities/Cargo.toml b/primitives/externalities/Cargo.toml index 52a6300688cd9..5a1b6b5e73734 100644 --- a/primitives/externalities/Cargo.toml +++ b/primitives/externalities/Cargo.toml @@ -4,7 +4,7 @@ version = "0.10.0-dev" license = "Apache-2.0" authors = ["Parity Technologies "] edition = "2018" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Substrate externalities abstraction" documentation = "https://docs.rs/sp-externalities" diff --git a/primitives/finality-grandpa/Cargo.toml b/primitives/finality-grandpa/Cargo.toml index c0c2a654270f7..5cf7ac6711a70 100644 --- a/primitives/finality-grandpa/Cargo.toml +++ b/primitives/finality-grandpa/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Primitives for GRANDPA integration, suitable for WASM compilation." documentation = "https://docs.rs/sp-finality-grandpa" diff --git a/primitives/inherents/Cargo.toml b/primitives/inherents/Cargo.toml index 23558750b5cf8..04b662fb059f0 100644 --- a/primitives/inherents/Cargo.toml +++ b/primitives/inherents/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Provides types and traits for creating and checking inherents." documentation = "https://docs.rs/sp-inherents" diff --git a/primitives/io/Cargo.toml b/primitives/io/Cargo.toml index d3a2b56705926..df9a496a914be 100644 --- a/primitives/io/Cargo.toml +++ b/primitives/io/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "I/O for Substrate runtimes" documentation = "https://docs.rs/sp-io" @@ -24,14 +24,13 @@ libsecp256k1 = { version = "0.6", optional = true } sp-state-machine = { version = "0.10.0-dev", optional = true, path = "../state-machine" } sp-wasm-interface = { version = "4.0.0-dev", path = "../wasm-interface", default-features = false } sp-runtime-interface = { version = "4.0.0-dev", default-features = false, path = "../runtime-interface" } -sp-maybe-compressed-blob = { version = "4.0.0-dev", optional = true, path = "../maybe-compressed-blob" } sp-trie = { version = "4.0.0-dev", optional = true, path = "../trie" } sp-externalities = { version = "0.10.0-dev", optional = true, path = "../externalities" } sp-tracing = { version = "4.0.0-dev", default-features = false, path = "../tracing" } log = { version = "0.4.8", optional = true } futures = { version = "0.3.1", features = ["thread-pool"], optional = true } parking_lot = { version = "0.11.1", optional = true } -tracing = { version = "0.1.25", default-features = false } +tracing = { version = "0.1.29", default-features = false } tracing-core = { version = "0.1.17", default-features = false} [features] @@ -48,7 +47,6 @@ std = [ "sp-runtime-interface/std", "sp-externalities", "sp-wasm-interface/std", - "sp-maybe-compressed-blob", "sp-tracing/std", "tracing/std", "tracing-core/std", diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 5faeb59c72db6..78e6f0c847952 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -73,6 +73,7 @@ mod batch_verifier; #[cfg(feature = "std")] use batch_verifier::BatchVerifier; +#[cfg(feature = "std")] const LOG_TARGET: &str = "runtime::io"; /// Error verifying ECDSA signature @@ -1481,21 +1482,17 @@ mod allocator_impl { #[panic_handler] #[no_mangle] pub fn panic(info: &core::panic::PanicInfo) -> ! { - unsafe { - let message = sp_std::alloc::format!("{}", info); - logging::log(LogLevel::Error, "runtime", message.as_bytes()); - core::arch::wasm32::unreachable(); - } + let message = sp_std::alloc::format!("{}", info); + logging::log(LogLevel::Error, "runtime", message.as_bytes()); + core::arch::wasm32::unreachable(); } /// A default OOM handler for WASM environment. #[cfg(all(not(feature = "disable_oom"), not(feature = "std")))] #[alloc_error_handler] pub fn oom(_: core::alloc::Layout) -> ! { - unsafe { - logging::log(LogLevel::Error, "runtime", b"Runtime memory exhausted. Aborting"); - core::arch::wasm32::unreachable(); - } + logging::log(LogLevel::Error, "runtime", b"Runtime memory exhausted. Aborting"); + core::arch::wasm32::unreachable(); } /// Type alias for Externalities implementation used in tests. diff --git a/primitives/keyring/Cargo.toml b/primitives/keyring/Cargo.toml index a14e98d3d8059..464abdb6cb1aa 100644 --- a/primitives/keyring/Cargo.toml +++ b/primitives/keyring/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Keyring support code for the runtime. A set of test accounts." documentation = "https://docs.rs/sp-keyring" diff --git a/primitives/keyring/src/sr25519.rs b/primitives/keyring/src/sr25519.rs index 6a7aa3635a43a..604c330b1ea1b 100644 --- a/primitives/keyring/src/sr25519.rs +++ b/primitives/keyring/src/sr25519.rs @@ -89,9 +89,20 @@ impl Keyring { pub fn public(self) -> Public { self.pair().public() } + pub fn to_seed(self) -> String { format!("//{}", self) } + + /// Create a crypto `Pair` from a numeric value. + pub fn numeric(idx: usize) -> Pair { + Pair::from_string(&format!("//{}", idx), None).expect("numeric values are known good; qed") + } + + /// Get account id of a `numeric` account. + pub fn numeric_id(idx: usize) -> AccountId32 { + (*Self::numeric(idx).public().as_array_ref()).into() + } } impl From for &'static str { diff --git a/primitives/keystore/Cargo.toml b/primitives/keystore/Cargo.toml index 35c66ef93f7aa..3a0532f1db313 100644 --- a/primitives/keystore/Cargo.toml +++ b/primitives/keystore/Cargo.toml @@ -4,7 +4,7 @@ version = "0.10.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Keystore primitives." documentation = "https://docs.rs/sp-core" diff --git a/primitives/maybe-compressed-blob/Cargo.toml b/primitives/maybe-compressed-blob/Cargo.toml index 8d47c89ea8ebe..cd124580ef19c 100644 --- a/primitives/maybe-compressed-blob/Cargo.toml +++ b/primitives/maybe-compressed-blob/Cargo.toml @@ -4,11 +4,11 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Handling of blobs, usually Wasm code, which may be compresed" documentation = "https://docs.rs/sp-maybe-compressed-blob" readme = "README.md" [dependencies] -zstd = { version = "0.6.0", default-features = false } +zstd = { version = "0.9.0", default-features = false } diff --git a/primitives/npos-elections/Cargo.toml b/primitives/npos-elections/Cargo.toml index b277df8f58f12..5ffaf76379bda 100644 --- a/primitives/npos-elections/Cargo.toml +++ b/primitives/npos-elections/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "NPoS election algorithm primitives" readme = "README.md" diff --git a/primitives/npos-elections/fuzzer/Cargo.toml b/primitives/npos-elections/fuzzer/Cargo.toml index d6fcc09c8b586..1d13d33a35e80 100644 --- a/primitives/npos-elections/fuzzer/Cargo.toml +++ b/primitives/npos-elections/fuzzer/Cargo.toml @@ -4,7 +4,7 @@ version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Fuzzer for phragmén implementation." documentation = "https://docs.rs/sp-npos-elections-fuzzer" diff --git a/primitives/npos-elections/solution-type/Cargo.toml b/primitives/npos-elections/solution-type/Cargo.toml index cbe6750266f01..27d5d0bb1231c 100644 --- a/primitives/npos-elections/solution-type/Cargo.toml +++ b/primitives/npos-elections/solution-type/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "NPoS Solution Type" @@ -15,13 +15,13 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -syn = { version = "1.0.58", features = ["full", "visit"] } +syn = { version = "1.0.80", features = ["full", "visit"] } quote = "1.0" proc-macro2 = "1.0.29" proc-macro-crate = "1.0.0" [dev-dependencies] -parity-scale-codec = "2.0.1" +parity-scale-codec = "2.3.1" scale-info = "1.0" sp-arithmetic = { path = "../../arithmetic", version = "4.0.0-dev" } # used by generate_solution_type: diff --git a/primitives/offchain/Cargo.toml b/primitives/offchain/Cargo.toml index dd54147b6c629..c1e891acba955 100644 --- a/primitives/offchain/Cargo.toml +++ b/primitives/offchain/Cargo.toml @@ -5,7 +5,7 @@ version = "4.0.0-dev" license = "Apache-2.0" authors = ["Parity Technologies "] edition = "2018" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" diff --git a/primitives/panic-handler/Cargo.toml b/primitives/panic-handler/Cargo.toml index ad03baca24ebb..c961d5b089abb 100644 --- a/primitives/panic-handler/Cargo.toml +++ b/primitives/panic-handler/Cargo.toml @@ -4,7 +4,7 @@ version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Custom panic hook with bug report link" documentation = "https://docs.rs/sp-panic-handler" diff --git a/primitives/rpc/Cargo.toml b/primitives/rpc/Cargo.toml index 8e1b91a9acb21..af883e2199415 100644 --- a/primitives/rpc/Cargo.toml +++ b/primitives/rpc/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Substrate RPC primitives and utilities." readme = "README.md" diff --git a/primitives/runtime-interface/Cargo.toml b/primitives/runtime-interface/Cargo.toml index dd1b84eabfe93..fc8923cdb80a3 100644 --- a/primitives/runtime-interface/Cargo.toml +++ b/primitives/runtime-interface/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Substrate runtime interface" documentation = "https://docs.rs/sp-runtime-interface/" diff --git a/primitives/runtime-interface/proc-macro/Cargo.toml b/primitives/runtime-interface/proc-macro/Cargo.toml index 1eb3bdd9039d9..dd08d03313396 100644 --- a/primitives/runtime-interface/proc-macro/Cargo.toml +++ b/primitives/runtime-interface/proc-macro/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "This crate provides procedural macros for usage within the context of the Substrate runtime interface." documentation = "https://docs.rs/sp-runtime-interface-proc-macro" @@ -16,8 +16,8 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -syn = { version = "1.0.58", features = ["full", "visit", "fold", "extra-traits"] } -quote = "1.0.3" +syn = { version = "1.0.80", features = ["full", "visit", "fold", "extra-traits"] } +quote = "1.0.10" proc-macro2 = "1.0.29" Inflector = "0.11.4" proc-macro-crate = "1.0.0" diff --git a/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml b/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml index 3ae5d78b0ef95..a3c82de473abd 100644 --- a/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml +++ b/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] edition = "2018" build = "build.rs" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" publish = false diff --git a/primitives/runtime-interface/test-wasm/Cargo.toml b/primitives/runtime-interface/test-wasm/Cargo.toml index 7c7d3e10b2d0c..557b5b9bee89f 100644 --- a/primitives/runtime-interface/test-wasm/Cargo.toml +++ b/primitives/runtime-interface/test-wasm/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] edition = "2018" build = "build.rs" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" publish = false diff --git a/primitives/runtime-interface/test/Cargo.toml b/primitives/runtime-interface/test/Cargo.toml index 377729521fcfe..26884d5cb729f 100644 --- a/primitives/runtime-interface/test/Cargo.toml +++ b/primitives/runtime-interface/test/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" publish = false -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" [package.metadata.docs.rs] @@ -20,5 +20,5 @@ sp-runtime-interface-test-wasm-deprecated = { version = "2.0.0", path = "../test sp-state-machine = { version = "0.10.0-dev", path = "../../state-machine" } sp-runtime = { version = "4.0.0-dev", path = "../../runtime" } sp-io = { version = "4.0.0-dev", path = "../../io" } -tracing = "0.1.25" +tracing = "0.1.29" tracing-core = "0.1.17" diff --git a/primitives/runtime-interface/tests/ui/pass_by_enum_with_struct.stderr b/primitives/runtime-interface/tests/ui/pass_by_enum_with_struct.stderr index c7ed1af3b1a03..44fb5a244e03d 100644 --- a/primitives/runtime-interface/tests/ui/pass_by_enum_with_struct.stderr +++ b/primitives/runtime-interface/tests/ui/pass_by_enum_with_struct.stderr @@ -4,4 +4,4 @@ error: `PassByEnum` only supports enums as input type. 3 | #[derive(PassByEnum)] | ^^^^^^^^^^ | - = note: this error originates in a derive macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the derive macro `PassByEnum` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/primitives/runtime-interface/tests/ui/pass_by_enum_with_value_variant.stderr b/primitives/runtime-interface/tests/ui/pass_by_enum_with_value_variant.stderr index f6c85ed2bba3e..633dc3bbe8bc4 100644 --- a/primitives/runtime-interface/tests/ui/pass_by_enum_with_value_variant.stderr +++ b/primitives/runtime-interface/tests/ui/pass_by_enum_with_value_variant.stderr @@ -4,4 +4,4 @@ error: `PassByEnum` only supports unit variants. 3 | #[derive(PassByEnum)] | ^^^^^^^^^^ | - = note: this error originates in a derive macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the derive macro `PassByEnum` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/primitives/runtime-interface/tests/ui/pass_by_inner_with_two_fields.stderr b/primitives/runtime-interface/tests/ui/pass_by_inner_with_two_fields.stderr index 9afbce76f0c23..0ffee00210e79 100644 --- a/primitives/runtime-interface/tests/ui/pass_by_inner_with_two_fields.stderr +++ b/primitives/runtime-interface/tests/ui/pass_by_inner_with_two_fields.stderr @@ -4,4 +4,4 @@ error: Only newtype/one field structs are supported by `PassByInner`! 3 | #[derive(PassByInner)] | ^^^^^^^^^^^ | - = note: this error originates in a derive macro (in Nightly builds, run with -Z macro-backtrace for more info) + = note: this error originates in the derive macro `PassByInner` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/primitives/runtime/Cargo.toml b/primitives/runtime/Cargo.toml index 5ac5bcf1963e0..475d2b769de39 100644 --- a/primitives/runtime/Cargo.toml +++ b/primitives/runtime/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Runtime Modules shared primitive types." documentation = "https://docs.rs/sp-runtime" diff --git a/primitives/runtime/src/traits.rs b/primitives/runtime/src/traits.rs index 65c063fde1696..6d79d740dc4e1 100644 --- a/primitives/runtime/src/traits.rs +++ b/primitives/runtime/src/traits.rs @@ -1474,6 +1474,7 @@ macro_rules! impl_opaque_keys { #[macro_export] #[cfg(not(feature = "std"))] +#[doc(hidden)] macro_rules! impl_opaque_keys { { $( #[ $attr:meta ] )* diff --git a/primitives/sandbox/Cargo.toml b/primitives/sandbox/Cargo.toml index a4d4a4d5d031a..80cd195c6f406 100755 --- a/primitives/sandbox/Cargo.toml +++ b/primitives/sandbox/Cargo.toml @@ -4,7 +4,7 @@ version = "0.10.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "This crate provides means to instantiate and execute wasm modules." readme = "README.md" diff --git a/primitives/serializer/Cargo.toml b/primitives/serializer/Cargo.toml index 2200274e0628d..cc438e9a1c148 100644 --- a/primitives/serializer/Cargo.toml +++ b/primitives/serializer/Cargo.toml @@ -4,7 +4,7 @@ version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Substrate customizable serde serializer." documentation = "https://docs.rs/sp-serializer" diff --git a/primitives/session/Cargo.toml b/primitives/session/Cargo.toml index 8e1e2464e49ec..1e9ed6ec9b651 100644 --- a/primitives/session/Cargo.toml +++ b/primitives/session/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Primitives for sessions" readme = "README.md" diff --git a/primitives/staking/Cargo.toml b/primitives/staking/Cargo.toml index 9e852319ede42..8ea24760e2b8b 100644 --- a/primitives/staking/Cargo.toml +++ b/primitives/staking/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "A crate which contains primitives that are useful for implementation that uses staking approaches in general. Definitions related to sessions, slashing, etc go here." readme = "README.md" diff --git a/primitives/state-machine/Cargo.toml b/primitives/state-machine/Cargo.toml index 457bbac5d2640..bbe9728befd80 100644 --- a/primitives/state-machine/Cargo.toml +++ b/primitives/state-machine/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] description = "Substrate State Machine" edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" documentation = "https://docs.rs/sp-state-machine" readme = "README.md" @@ -27,15 +27,15 @@ codec = { package = "parity-scale-codec", version = "2.0.0", default-features = num-traits = { version = "0.2.8", default-features = false } rand = { version = "0.7.2", optional = true } sp-externalities = { version = "0.10.0-dev", path = "../externalities", default-features = false } -smallvec = "1.4.1" +smallvec = "1.7.0" sp-std = { version = "4.0.0-dev", default-features = false, path = "../std" } -tracing = { version = "0.1.22", optional = true } +tracing = { version = "0.1.29", optional = true } [dev-dependencies] -hex-literal = "0.3.1" +hex-literal = "0.3.3" sp-runtime = { version = "4.0.0-dev", path = "../runtime" } -pretty_assertions = "0.6.1" -rand = { version = "0.7.2" } +pretty_assertions = "1.0.0" +rand = "0.7.2" [features] default = ["std"] diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 1b1a732f8d0fc..7dcf92b06de06 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -21,9 +21,9 @@ use crate::{ trie_backend::TrieBackend, trie_backend_essence::TrieBackendStorage, ChildStorageCollection, StorageCollection, StorageKey, StorageValue, UsageInfo, }; -use codec::{Decode, Encode}; +use codec::Encode; use hash_db::Hasher; -use sp_core::storage::{well_known_keys, ChildInfo, TrackedStorageKey}; +use sp_core::storage::{ChildInfo, TrackedStorageKey}; #[cfg(feature = "std")] use sp_core::traits::RuntimeCode; use sp_std::vec::Vec; @@ -330,7 +330,11 @@ impl<'a, B: Backend, H: Hasher> sp_core::traits::FetchRuntimeCode for BackendRuntimeCode<'a, B, H> { fn fetch_runtime_code<'b>(&'b self) -> Option> { - self.backend.storage(well_known_keys::CODE).ok().flatten().map(Into::into) + self.backend + .storage(sp_core::storage::well_known_keys::CODE) + .ok() + .flatten() + .map(Into::into) } } @@ -348,17 +352,17 @@ where pub fn runtime_code(&self) -> Result { let hash = self .backend - .storage_hash(well_known_keys::CODE) + .storage_hash(sp_core::storage::well_known_keys::CODE) .ok() .flatten() .ok_or("`:code` hash not found")? .encode(); let heap_pages = self .backend - .storage(well_known_keys::HEAP_PAGES) + .storage(sp_core::storage::well_known_keys::HEAP_PAGES) .ok() .flatten() - .and_then(|d| Decode::decode(&mut &d[..]).ok()); + .and_then(|d| codec::Decode::decode(&mut &d[..]).ok()); Ok(RuntimeCode { code_fetcher: self, hash, heap_pages }) } diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index c9693ca6a88c1..c20d8492fb1f3 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -17,17 +17,15 @@ //! Concrete externalities implementation. -use crate::{ - backend::Backend, overlayed_changes::OverlayedExtensions, IndexOperation, OverlayedChanges, - StorageKey, StorageValue, -}; +#[cfg(feature = "std")] +use crate::overlayed_changes::OverlayedExtensions; +use crate::{backend::Backend, IndexOperation, OverlayedChanges, StorageKey, StorageValue}; use codec::{Decode, Encode, EncodeAppend}; use hash_db::Hasher; -use sp_core::{ - hexdisplay::HexDisplay, - storage::{well_known_keys::is_child_storage_key, ChildInfo, TrackedStorageKey}, -}; -use sp_externalities::{Extension, ExtensionStore, Extensions, Externalities}; +#[cfg(feature = "std")] +use sp_core::hexdisplay::HexDisplay; +use sp_core::storage::{well_known_keys::is_child_storage_key, ChildInfo, TrackedStorageKey}; +use sp_externalities::{Extension, ExtensionStore, Externalities}; use sp_trie::{empty_child_trie_root, trie_types::Layout}; #[cfg(feature = "std")] @@ -37,7 +35,7 @@ use sp_std::{ any::{Any, TypeId}, boxed::Box, cmp::Ordering, - fmt, vec, + vec, vec::Vec, }; #[cfg(feature = "std")] @@ -72,8 +70,8 @@ pub enum Error { } #[cfg(feature = "std")] -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { +impl std::fmt::Display for Error { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match *self { Error::Backend(ref e) => write!(f, "Storage backend error: {}", e), Error::Executor(ref e) => write!(f, "Sub-call execution error: {}", e), @@ -139,7 +137,7 @@ where storage_transaction_cache: &'a mut StorageTransactionCache, backend: &'a B, changes_trie_state: Option>, - extensions: Option<&'a mut Extensions>, + extensions: Option<&'a mut sp_externalities::Extensions>, ) -> Self { Self { overlay, diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 05d2c6d20ccee..7bd0c645f3c00 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -55,11 +55,19 @@ pub use tracing::trace; #[cfg(not(feature = "std"))] #[macro_export] macro_rules! warn { - (target: $target:expr, $($arg:tt)+) => { - () + (target: $target:expr, $message:expr $( , $arg:ident )* $( , )?) => { + { + $( + let _ = &$arg; + )* + } }; - ($($arg:tt)+) => { - () + ($message:expr, $( $arg:expr, )*) => { + { + $( + let _ = &$arg; + )* + } }; } @@ -68,11 +76,12 @@ macro_rules! warn { #[cfg(not(feature = "std"))] #[macro_export] macro_rules! debug { - (target: $target:expr, $($arg:tt)+) => { - () - }; - ($($arg:tt)+) => { - () + (target: $target:expr, $message:expr $( , $arg:ident )* $( , )?) => { + { + $( + let _ = &$arg; + )* + } }; } diff --git a/primitives/state-machine/src/overlayed_changes/mod.rs b/primitives/state-machine/src/overlayed_changes/mod.rs index a0558e06a380e..cf7af1c9a6f3a 100644 --- a/primitives/state-machine/src/overlayed_changes/mod.rs +++ b/primitives/state-machine/src/overlayed_changes/mod.rs @@ -21,15 +21,7 @@ mod changeset; mod offchain; use self::changeset::OverlayedChangeSet; -use crate::{backend::Backend, stats::StateMachineStats}; -pub use offchain::OffchainOverlayedChanges; -use sp_std::{ - any::{Any, TypeId}, - boxed::Box, - vec::Vec, -}; - -use crate::{changes_trie::BlockNumber, DefaultError}; +use crate::{backend::Backend, changes_trie::BlockNumber, stats::StateMachineStats, DefaultError}; #[cfg(feature = "std")] use crate::{ changes_trie::{build_changes_trie, State as ChangesTrieState}, @@ -37,16 +29,23 @@ use crate::{ }; use codec::{Decode, Encode}; use hash_db::Hasher; +pub use offchain::OffchainOverlayedChanges; use sp_core::{ offchain::OffchainOverlayedChange, storage::{well_known_keys::EXTRINSIC_INDEX, ChildInfo}, }; +#[cfg(feature = "std")] use sp_externalities::{Extension, Extensions}; #[cfg(not(feature = "std"))] -use sp_std::collections::btree_map::{BTreeMap as Map, Entry as MapEntry}; -use sp_std::collections::btree_set::BTreeSet; +use sp_std::collections::btree_map::BTreeMap as Map; +use sp_std::{collections::btree_set::BTreeSet, vec::Vec}; #[cfg(feature = "std")] use std::collections::{hash_map::Entry as MapEntry, HashMap as Map}; +#[cfg(feature = "std")] +use std::{ + any::{Any, TypeId}, + boxed::Box, +}; pub use self::changeset::{AlreadyInRuntime, NoOpenTransaction, NotInRuntime, OverlayedValue}; @@ -581,6 +580,8 @@ impl OverlayedChanges { self.changes_trie_root(backend, changes_trie_state, parent_hash, false, &mut cache) .map_err(|_| "Failed to generate changes trie transaction")?; } + #[cfg(not(feature = "std"))] + let _ = parent_hash; #[cfg(feature = "std")] let changes_trie_transaction = cache @@ -758,6 +759,7 @@ where /// An overlayed extension is either a mutable reference /// or an owned extension. +#[cfg(feature = "std")] pub enum OverlayedExtension<'a> { MutRef(&'a mut Box), Owned(Box), @@ -770,10 +772,12 @@ pub enum OverlayedExtension<'a> { /// as owned references. After the execution of a runtime function, we /// can safely drop this object while not having modified the original /// list. +#[cfg(feature = "std")] pub struct OverlayedExtensions<'a> { extensions: Map>, } +#[cfg(feature = "std")] impl<'a> OverlayedExtensions<'a> { /// Create a new instance of overalyed extensions from the given extensions. pub fn new(extensions: &'a mut Extensions) -> Self { diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 557a098fbaf79..6c575f0d76bc7 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -24,7 +24,7 @@ use hash_db::{self, Hasher, Prefix}; #[cfg(feature = "std")] use parking_lot::RwLock; use sp_core::storage::ChildInfo; -use sp_std::{boxed::Box, ops::Deref, vec::Vec}; +use sp_std::{boxed::Box, vec::Vec}; use sp_trie::{ empty_child_trie_root, read_child_trie_value, read_trie_value, trie_types::{Layout, TrieDB, TrieError}, @@ -37,8 +37,11 @@ use std::sync::Arc; #[cfg(not(feature = "std"))] macro_rules! format { - ($($arg:tt)+) => { - crate::DefaultError + ( $message:expr, $( $arg:expr )* ) => { + { + $( let _ = &$arg; )* + crate::DefaultError + } }; } @@ -488,7 +491,7 @@ impl TrieBackendStorage for Arc> { type Overlay = PrefixedMemoryDB; fn get(&self, key: &H::Out, prefix: Prefix) -> Result> { - Storage::::get(self.deref(), key, prefix) + Storage::::get(std::ops::Deref::deref(self), key, prefix) } } diff --git a/primitives/std/Cargo.toml b/primitives/std/Cargo.toml index bf815c1c80c56..e4cacf60cc36f 100644 --- a/primitives/std/Cargo.toml +++ b/primitives/std/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Lowest-abstraction level for the Substrate runtime: just exports useful primitives from std or client/alloc to be used with any code that depends on the runtime." diff --git a/primitives/storage/Cargo.toml b/primitives/storage/Cargo.toml index 1a05fb9969197..c7d23fcf70103 100644 --- a/primitives/storage/Cargo.toml +++ b/primitives/storage/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] edition = "2018" description = "Storage related primitives" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" documentation = "https://docs.rs/sp-storage/" readme = "README.md" @@ -18,9 +18,9 @@ sp-std = { version = "4.0.0-dev", default-features = false, path = "../std" } serde = { version = "1.0.126", optional = true, features = ["derive"] } impl-serde = { version = "0.3.1", optional = true } ref-cast = "1.0.0" -sp-debug-derive = { version = "3.0.0", path = "../debug-derive" } +sp-debug-derive = { version = "3.0.0", default-features = false, path = "../debug-derive" } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } [features] default = [ "std" ] -std = [ "sp-std/std", "serde", "impl-serde", "codec/std" ] +std = [ "sp-std/std", "serde", "impl-serde", "codec/std", "sp-debug-derive/std" ] diff --git a/primitives/tasks/Cargo.toml b/primitives/tasks/Cargo.toml index ee503ae9b855f..f91d15d2d40b3 100644 --- a/primitives/tasks/Cargo.toml +++ b/primitives/tasks/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Runtime asynchronous, pure computational tasks" documentation = "https://docs.rs/sp-tasks" diff --git a/primitives/test-primitives/Cargo.toml b/primitives/test-primitives/Cargo.toml index 5aed5d679dd49..75ce8b752d3ca 100644 --- a/primitives/test-primitives/Cargo.toml +++ b/primitives/test-primitives/Cargo.toml @@ -4,7 +4,7 @@ version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" publish = false diff --git a/primitives/timestamp/Cargo.toml b/primitives/timestamp/Cargo.toml index 60daf9642df6b..32f4c53083435 100644 --- a/primitives/timestamp/Cargo.toml +++ b/primitives/timestamp/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Substrate core types and inherents for timestamps." readme = "README.md" diff --git a/primitives/tracing/Cargo.toml b/primitives/tracing/Cargo.toml index 3be09dcd576df..85eb22d6df072 100644 --- a/primitives/tracing/Cargo.toml +++ b/primitives/tracing/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" license = "Apache-2.0" authors = ["Parity Technologies "] edition = "2018" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Instrumentation primitives and macros for Substrate." readme = "README.md" @@ -22,17 +22,11 @@ sp-std = { version = "4.0.0-dev", path = "../std", default-features = false } codec = { version = "2.0.0", package = "parity-scale-codec", default-features = false, features = [ "derive", ] } -tracing = { version = "0.1.25", default-features = false } +tracing = { version = "0.1.29", default-features = false } tracing-core = { version = "0.1.17", default-features = false } -log = { version = "0.4.8", optional = true } tracing-subscriber = { version = "0.2.19", optional = true, features = [ "tracing-log", ] } -parking_lot = { version = "0.10.0", optional = true } -erased-serde = { version = "0.3.9", optional = true } -serde = { version = "1.0.126", optional = true } -serde_json = { version = "1.0.68", optional = true } -slog = { version = "2.5.2", features = ["nested-values"], optional = true } [features] default = ["std"] @@ -43,11 +37,5 @@ std = [ "tracing-core/std", "codec/std", "sp-std/std", - "log", "tracing-subscriber", - "parking_lot", - "erased-serde", - "serde", - "serde_json", - "slog", ] diff --git a/primitives/transaction-pool/Cargo.toml b/primitives/transaction-pool/Cargo.toml index 3f77014ac53b0..f74fcb44fee2d 100644 --- a/primitives/transaction-pool/Cargo.toml +++ b/primitives/transaction-pool/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Transaction pool runtime facing API." documentation = "https://docs.rs/sp-transaction-pool" diff --git a/primitives/transaction-storage-proof/Cargo.toml b/primitives/transaction-storage-proof/Cargo.toml index 8a41105b20b74..536e2f201a104 100644 --- a/primitives/transaction-storage-proof/Cargo.toml +++ b/primitives/transaction-storage-proof/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] description = "Transaction storage proof primitives" edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index 5a2de4f16f9a4..66d8a1e47276e 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -6,7 +6,7 @@ description = "Patricia trie stuff using a parity-scale-codec node format" repository = "https://github.com/paritytech/substrate/" license = "Apache-2.0" edition = "2018" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" documentation = "https://docs.rs/sp-trie" readme = "README.md" @@ -31,7 +31,7 @@ sp-core = { version = "4.0.0-dev", default-features = false, path = "../core" } trie-bench = "0.28.0" trie-standardmap = "0.15.2" criterion = "0.3.3" -hex-literal = "0.3.1" +hex-literal = "0.3.3" sp-runtime = { version = "4.0.0-dev", path = "../runtime" } [features] diff --git a/primitives/version/Cargo.toml b/primitives/version/Cargo.toml index fcab1eeabcaf4..2a2c2698c74c3 100644 --- a/primitives/version/Cargo.toml +++ b/primitives/version/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Version module for the Substrate runtime; Provides a function that returns the runtime version." documentation = "https://docs.rs/sp-version" diff --git a/primitives/version/proc-macro/Cargo.toml b/primitives/version/proc-macro/Cargo.toml index c3c801431434a..587ca06bdc179 100644 --- a/primitives/version/proc-macro/Cargo.toml +++ b/primitives/version/proc-macro/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Macro for defining a runtime version." documentation = "https://docs.rs/sp-api-proc-macro" @@ -16,8 +16,8 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -quote = "1.0.3" -syn = { version = "1.0.58", features = ["full", "fold", "extra-traits", "visit"] } +quote = "1.0.10" +syn = { version = "1.0.80", features = ["full", "fold", "extra-traits", "visit"] } proc-macro2 = "1.0.29" codec = { package = "parity-scale-codec", version = "2.0.0", features = [ "derive" ] } diff --git a/primitives/wasm-interface/Cargo.toml b/primitives/wasm-interface/Cargo.toml index ba8a7b4e4b466..73b47e563a5b5 100644 --- a/primitives/wasm-interface/Cargo.toml +++ b/primitives/wasm-interface/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Types and traits for interfacing between the host and the wasm runtime." documentation = "https://docs.rs/sp-wasm-interface" diff --git a/shell.nix b/shell.nix index 9a2d30400631f..a86af005383f7 100644 --- a/shell.nix +++ b/shell.nix @@ -5,7 +5,7 @@ let rev = "4a07484cf0e49047f82d83fd119acffbad3b235f"; }); nixpkgs = import { overlays = [ mozillaOverlay ]; }; - rust-nightly = with nixpkgs; ((rustChannelOf { date = "2021-07-06"; channel = "nightly"; }).rust.override { + rust-nightly = with nixpkgs; ((rustChannelOf { date = "2021-09-10"; channel = "nightly"; }).rust.override { extensions = [ "rust-src" ]; targets = [ "wasm32-unknown-unknown" ]; }); diff --git a/ss58-registry.json b/ss58-registry.json index 563cc248db9dd..fdae23a5a6f21 100644 --- a/ss58-registry.json +++ b/ss58-registry.json @@ -386,7 +386,7 @@ "symbols": null, "decimals": null, "standardAccount": "*25519", - "website": "https://substrate.dev/" + "website": "https://docs.substrate.io/" }, { "prefix": 43, @@ -532,6 +532,15 @@ "standardAccount": "*25519", "website": "https://manta.network" }, + { + "prefix": 88, + "network": "polkadex", + "displayName": "Polkadex Mainnet", + "symbols": ["PDEX"], + "decimals": [12], + "standardAccount": "*25519", + "website": "https://polkadex.trade" + }, { "prefix": 98, "network": "polkasmith", @@ -631,6 +640,15 @@ "standardAccount": "secp256k1", "website": "https://moonbeam.network" }, + { + "prefix": 2349, + "network": "automata", + "displayName": "Automata Mainnet", + "symbols": ["ATA"], + "decimals": [18], + "standardAccount": "*25519", + "website": "https://ata.network" + }, { "prefix": 10041, "network": "basilisk", @@ -639,6 +657,15 @@ "decimals": [12], "standardAccount": "*25519", "website": "https://bsx.fi" + }, + { + "prefix": 11820, + "network": "contextfree", + "displayName": "Automata ContextFree", + "symbols": ["CTX"], + "decimals": [18], + "standardAccount": "*25519", + "website": "https://ata.network" } ] } diff --git a/test-utils/Cargo.toml b/test-utils/Cargo.toml index 4eed6e5e29133..a9ffefa05df7f 100644 --- a/test-utils/Cargo.toml +++ b/test-utils/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Substrate test utilities" diff --git a/test-utils/client/Cargo.toml b/test-utils/client/Cargo.toml index 34238872cad84..204b6ac435e07 100644 --- a/test-utils/client/Cargo.toml +++ b/test-utils/client/Cargo.toml @@ -4,7 +4,7 @@ version = "2.0.1" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" publish = false diff --git a/test-utils/derive/Cargo.toml b/test-utils/derive/Cargo.toml index 545e8cf332618..2a1f52346840f 100644 --- a/test-utils/derive/Cargo.toml +++ b/test-utils/derive/Cargo.toml @@ -4,13 +4,13 @@ version = "0.10.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Substrate test utilities macros" [dependencies] -quote = "1.0.6" -syn = { version = "1.0.58", features = ["full"] } +quote = "1.0.10" +syn = { version = "1.0.80", features = ["full"] } proc-macro-crate = "1.0.0" proc-macro2 = "1.0.29" diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index 24f4d404c18bd..eb6ca51ce2e5a 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] edition = "2018" build = "build.rs" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" publish = false diff --git a/test-utils/runtime/client/Cargo.toml b/test-utils/runtime/client/Cargo.toml index 3561697042f2a..75ebb8f23326c 100644 --- a/test-utils/runtime/client/Cargo.toml +++ b/test-utils/runtime/client/Cargo.toml @@ -4,7 +4,7 @@ version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" publish = false diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index dc5ccadc4574f..bcfe93b6f7975 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -37,7 +37,7 @@ use sc_client_api::light::{ use sp_core::{ sr25519, storage::{ChildInfo, Storage, StorageChild}, - ChangesTrieConfiguration, + ChangesTrieConfiguration, Pair, }; use sp_runtime::traits::{Block as BlockT, Hash as HashT, HashFor, Header as HeaderT, NumberFor}; use substrate_test_runtime::genesismap::{additional_storage_with_genesis, GenesisConfig}; @@ -118,11 +118,15 @@ impl GenesisParameters { sr25519::Public::from(Sr25519Keyring::Bob).into(), sr25519::Public::from(Sr25519Keyring::Charlie).into(), ], - vec![ - AccountKeyring::Alice.into(), - AccountKeyring::Bob.into(), - AccountKeyring::Charlie.into(), - ], + (0..16_usize) + .into_iter() + .map(|i| AccountKeyring::numeric(i).public()) + .chain(vec![ + AccountKeyring::Alice.into(), + AccountKeyring::Bob.into(), + AccountKeyring::Charlie.into(), + ]) + .collect(), 1000, self.heap_pages_override, self.extra_storage.clone(), diff --git a/test-utils/runtime/client/src/trait_tests.rs b/test-utils/runtime/client/src/trait_tests.rs index c5e0ba49fcf5b..938aeda36d319 100644 --- a/test-utils/runtime/client/src/trait_tests.rs +++ b/test-utils/runtime/client/src/trait_tests.rs @@ -67,7 +67,6 @@ where .block; block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); - #[allow(deprecated)] assert_eq!(blockchain.leaves().unwrap(), vec![a2.hash()]); // A2 -> A3 diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 0d880d508ef38..943c41c247f75 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -36,7 +36,11 @@ use sp_trie::{ use trie_db::{Trie, TrieMut}; use cfg_if::cfg_if; -use frame_support::{parameter_types, traits::KeyOwnerProofSystem, weights::RuntimeDbWeight}; +use frame_support::{ + parameter_types, + traits::{CrateVersion, KeyOwnerProofSystem}, + weights::RuntimeDbWeight, +}; use frame_system::limits::{BlockLength, BlockWeights}; use sp_api::{decl_runtime_apis, impl_runtime_apis}; pub use sp_core::hash::H256; @@ -520,6 +524,35 @@ impl frame_support::traits::PalletInfo for Runtime { return Some("Babe") } + None + } + fn module_name() -> Option<&'static str> { + let type_id = sp_std::any::TypeId::of::

(); + if type_id == sp_std::any::TypeId::of::>() { + return Some("system") + } + if type_id == sp_std::any::TypeId::of::>() { + return Some("pallet_timestamp") + } + if type_id == sp_std::any::TypeId::of::>() { + return Some("pallet_babe") + } + + None + } + fn crate_version() -> Option { + use frame_support::traits::PalletInfoAccess as _; + let type_id = sp_std::any::TypeId::of::

(); + if type_id == sp_std::any::TypeId::of::>() { + return Some(system::Pallet::::crate_version()) + } + if type_id == sp_std::any::TypeId::of::>() { + return Some(pallet_timestamp::Pallet::::crate_version()) + } + if type_id == sp_std::any::TypeId::of::>() { + return Some(pallet_babe::Pallet::::crate_version()) + } + None } } @@ -574,6 +607,7 @@ impl pallet_timestamp::Config for Runtime { parameter_types! { pub const EpochDuration: u64 = 6; pub const ExpectedBlockTime: u64 = 10_000; + pub const MaxAuthorities: u32 = 10; } impl pallet_babe::Config for Runtime { @@ -596,8 +630,9 @@ impl pallet_babe::Config for Runtime { )>>::IdentificationTuple; type HandleEquivocation = (); - type WeightInfo = (); + + type MaxAuthorities = MaxAuthorities; } /// Adds one to the given input and returns the final result. diff --git a/test-utils/runtime/transaction-pool/Cargo.toml b/test-utils/runtime/transaction-pool/Cargo.toml index 09839ebae6ffe..5a2983b058b04 100644 --- a/test-utils/runtime/transaction-pool/Cargo.toml +++ b/test-utils/runtime/transaction-pool/Cargo.toml @@ -4,7 +4,7 @@ version = "2.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" publish = false diff --git a/test-utils/test-crate/Cargo.toml b/test-utils/test-crate/Cargo.toml index fff39c3964ad8..9c9672fe8f5d8 100644 --- a/test-utils/test-crate/Cargo.toml +++ b/test-utils/test-crate/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" publish = false diff --git a/test-utils/test-runner/src/utils.rs b/test-utils/test-runner/src/utils.rs index 8e8c84e6b4f8a..a4c45c25f969b 100644 --- a/test-utils/test-runner/src/utils.rs +++ b/test-utils/test-runner/src/utils.rs @@ -94,6 +94,7 @@ pub fn default_config(tokio_handle: Handle, mut chain_spec: Box) rpc_cors: None, rpc_methods: Default::default(), rpc_max_payload: None, + ws_max_out_buffer_capacity: None, prometheus_config: None, telemetry_endpoints: None, default_heap_pages: None, @@ -108,7 +109,6 @@ pub fn default_config(tokio_handle: Handle, mut chain_spec: Box) base_path: Some(base_path), wasm_runtime_overrides: None, informant_output_format, - disable_log_reloading: false, keystore_remote: None, keep_blocks: KeepBlocks::All, state_pruning: Default::default(), diff --git a/utils/build-script-utils/Cargo.toml b/utils/build-script-utils/Cargo.toml index fbef70db93bfd..786e6f9002914 100644 --- a/utils/build-script-utils/Cargo.toml +++ b/utils/build-script-utils/Cargo.toml @@ -4,7 +4,7 @@ version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Crate with utility functions for `build.rs` scripts." readme = "README.md" diff --git a/utils/fork-tree/Cargo.toml b/utils/fork-tree/Cargo.toml index 11c269bc3cba8..81fa1747a84d7 100644 --- a/utils/fork-tree/Cargo.toml +++ b/utils/fork-tree/Cargo.toml @@ -4,7 +4,7 @@ version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Utility library for managing tree-like ordered data with logic for pruning the tree while finalizing nodes." documentation = "https://docs.rs/fork-tree" diff --git a/utils/frame/benchmarking-cli/Cargo.toml b/utils/frame/benchmarking-cli/Cargo.toml index 93616b590f61e..ccca30849f919 100644 --- a/utils/frame/benchmarking-cli/Cargo.toml +++ b/utils/frame/benchmarking-cli/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "CLI for benchmarking FRAME" readme = "README.md" diff --git a/utils/frame/frame-utilities-cli/Cargo.toml b/utils/frame/frame-utilities-cli/Cargo.toml index 1b6597fc9f2fc..fcdbb215f91b2 100644 --- a/utils/frame/frame-utilities-cli/Cargo.toml +++ b/utils/frame/frame-utilities-cli/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "cli interface for FRAME" documentation = "https://docs.rs/substrate-frame-cli" diff --git a/utils/frame/frame-utilities-cli/src/pallet_id.rs b/utils/frame/frame-utilities-cli/src/pallet_id.rs index 2caac7db588a9..d173f52b39cd8 100644 --- a/utils/frame/frame-utilities-cli/src/pallet_id.rs +++ b/utils/frame/frame-utilities-cli/src/pallet_id.rs @@ -22,7 +22,7 @@ use sc_cli::{ utils::print_from_uri, with_crypto_scheme, CryptoSchemeFlag, Error, KeystoreParams, OutputTypeFlag, }; -use sp_core::crypto::{Ss58AddressFormat, Ss58Codec}; +use sp_core::crypto::{unwrap_or_default_ss58_version, Ss58AddressFormat, Ss58Codec}; use sp_runtime::traits::AccountIdConversion; use std::convert::{TryFrom, TryInto}; use structopt::StructOpt; @@ -78,7 +78,7 @@ impl PalletIdCmd { with_crypto_scheme!( self.crypto_scheme.scheme, print_from_uri( - &account_id.to_ss58check_with_version(self.network.clone().unwrap_or_default()), + &account_id.to_ss58check_with_version(unwrap_or_default_ss58_version(self.network)), password, self.network, self.output_scheme.output_type.clone() diff --git a/utils/frame/generate-bags/Cargo.toml b/utils/frame/generate-bags/Cargo.toml index 384307fbec9e5..1bb53207f7d4c 100644 --- a/utils/frame/generate-bags/Cargo.toml +++ b/utils/frame/generate-bags/Cargo.toml @@ -4,7 +4,7 @@ version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Bag threshold generation script for pallet-bag-list" readme = "README.md" diff --git a/utils/frame/generate-bags/node-runtime/Cargo.toml b/utils/frame/generate-bags/node-runtime/Cargo.toml index 7fcd981a6bbd6..68d3cad16de47 100644 --- a/utils/frame/generate-bags/node-runtime/Cargo.toml +++ b/utils/frame/generate-bags/node-runtime/Cargo.toml @@ -4,7 +4,7 @@ version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Bag threshold generation script for pallet-bag-list and node-runtime." readme = "README.md" diff --git a/utils/frame/remote-externalities/Cargo.toml b/utils/frame/remote-externalities/Cargo.toml index ce774679f94c2..2b35402f8f63f 100644 --- a/utils/frame/remote-externalities/Cargo.toml +++ b/utils/frame/remote-externalities/Cargo.toml @@ -4,7 +4,7 @@ version = "0.10.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "An externalities provided environemnt that can load itself from remote nodes or cache files" readme = "README.md" @@ -13,10 +13,10 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -jsonrpsee-ws-client = { version = "0.3.0", default-features = false, features = [ +jsonrpsee-ws-client = { version = "0.3.1", default-features = false, features = [ "tokio1", ]} -jsonrpsee-proc-macros = "0.3.0" +jsonrpsee-proc-macros = "0.3.1" env_logger = "0.9" log = "0.4.11" diff --git a/utils/frame/remote-externalities/src/lib.rs b/utils/frame/remote-externalities/src/lib.rs index 2052780286c66..733ec7c3200ad 100644 --- a/utils/frame/remote-externalities/src/lib.rs +++ b/utils/frame/remote-externalities/src/lib.rs @@ -160,13 +160,16 @@ impl Default for SnapshotConfig { /// Builder for remote-externalities. pub struct Builder { - /// Custom key-pairs to be injected into the externalities. - inject: Vec, + /// Custom key-pairs to be injected into the externalities. The *hashed* keys and values must + /// be given. + hashed_key_values: Vec, /// Storage entry key prefixes to be injected into the externalities. The *hashed* prefix must /// be given. hashed_prefixes: Vec>, /// Storage entry keys to be injected into the externalities. The *hashed* key must be given. hashed_keys: Vec>, + /// The keys that will be excluded from the final externality. The *hashed* key must be given. + hashed_blacklist: Vec>, /// connectivity mode, online or offline. mode: Mode, } @@ -176,10 +179,11 @@ pub struct Builder { impl Default for Builder { fn default() -> Self { Self { - inject: Default::default(), mode: Default::default(), + hashed_key_values: Default::default(), hashed_prefixes: Default::default(), hashed_keys: Default::default(), + hashed_blacklist: Default::default(), } } } @@ -435,12 +439,26 @@ impl Builder { }, }; - debug!( - target: LOG_TARGET, - "extending externalities with {} manually injected key-values", - self.inject.len() - ); - base_kv.extend(self.inject.clone()); + // inject manual key values. + if !self.hashed_key_values.is_empty() { + debug!( + target: LOG_TARGET, + "extending externalities with {} manually injected key-values", + self.hashed_key_values.len() + ); + base_kv.extend(self.hashed_key_values.clone()); + } + + // exclude manual key values. + if !self.hashed_blacklist.is_empty() { + debug!( + target: LOG_TARGET, + "excluding externalities from {} keys", + self.hashed_blacklist.len() + ); + base_kv.retain(|(k, _)| !self.hashed_blacklist.contains(&k.0)) + } + Ok(base_kv) } } @@ -453,13 +471,12 @@ impl Builder { } /// Inject a manual list of key and values to the storage. - pub fn inject_key_value(mut self, injections: &[KeyPair]) -> Self { + pub fn inject_hashed_key_value(mut self, injections: &[KeyPair]) -> Self { for i in injections { - self.inject.push(i.clone()); + self.hashed_key_values.push(i.clone()); } self } - /// Inject a hashed prefix. This is treated as-is, and should be pre-hashed. /// /// This should be used to inject a "PREFIX", like a storage (double) map. @@ -476,6 +493,13 @@ impl Builder { self } + /// Blacklist this hashed key from the final externalities. This is treated as-is, and should be + /// pre-hashed. + pub fn blacklist_hashed_key(mut self, hashed: &[u8]) -> Self { + self.hashed_blacklist.push(hashed.to_vec()); + self + } + /// Configure a state snapshot to be used. pub fn mode(mut self, mode: Mode) -> Self { self.mode = mode; @@ -541,12 +565,44 @@ mod tests { .expect("Can't read state snapshot file") .execute_with(|| {}); } + + #[tokio::test] + async fn can_exclude_from_cache() { + init_logger(); + + // get the first key from the cache file. + let some_key = Builder::::new() + .mode(Mode::Offline(OfflineConfig { + state_snapshot: SnapshotConfig::new("test_data/proxy_test"), + })) + .build() + .await + .expect("Can't read state snapshot file") + .execute_with(|| { + let key = + sp_io::storage::next_key(&[]).expect("some key must exist in the snapshot"); + assert!(sp_io::storage::get(&key).is_some()); + key + }); + + Builder::::new() + .mode(Mode::Offline(OfflineConfig { + state_snapshot: SnapshotConfig::new("test_data/proxy_test"), + })) + .blacklist_hashed_key(&some_key) + .build() + .await + .expect("Can't read state snapshot file") + .execute_with(|| assert!(sp_io::storage::get(&some_key).is_none())); + } } #[cfg(all(test, feature = "remote-test"))] mod remote_tests { use super::test_prelude::*; + const REMOTE_INACCESSIBLE: &'static str = "Can't reach the remote node. Is it running?"; + #[tokio::test] async fn can_build_one_pallet() { init_logger(); @@ -557,7 +613,7 @@ mod remote_tests { })) .build() .await - .expect("Can't reach the remote node. Is it running?") + .expect(REMOTE_INACCESSIBLE) .execute_with(|| {}); } @@ -575,7 +631,7 @@ mod remote_tests { })) .build() .await - .expect("Can't reach the remote node. Is it running?") + .expect(REMOTE_INACCESSIBLE) .execute_with(|| {}); } @@ -599,7 +655,7 @@ mod remote_tests { })) .build() .await - .expect("Can't reach the remote node. Is it running?") + .expect(REMOTE_INACCESSIBLE) .execute_with(|| { // Gav's polkadot account. 99% this will be in the council. let gav_polkadot = @@ -625,7 +681,7 @@ mod remote_tests { })) .build() .await - .expect("Can't reach the remote node. Is it running?") + .expect(REMOTE_INACCESSIBLE) .execute_with(|| {}); let to_delete = std::fs::read_dir(SnapshotConfig::default().path) @@ -648,7 +704,7 @@ mod remote_tests { Builder::::new() .build() .await - .expect("Can't reach the remote node. Is it running?") + .expect(REMOTE_INACCESSIBLE) .execute_with(|| {}); } } diff --git a/utils/frame/rpc/support/Cargo.toml b/utils/frame/rpc/support/Cargo.toml index a94f18d0e8925..2010d1e02f73f 100644 --- a/utils/frame/rpc/support/Cargo.toml +++ b/utils/frame/rpc/support/Cargo.toml @@ -7,7 +7,7 @@ authors = [ ] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Substrate RPC for FRAME's support" diff --git a/utils/frame/rpc/system/Cargo.toml b/utils/frame/rpc/system/Cargo.toml index e9ae506ef6b06..85868836f0456 100644 --- a/utils/frame/rpc/system/Cargo.toml +++ b/utils/frame/rpc/system/Cargo.toml @@ -4,7 +4,7 @@ version = "4.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "FRAME's system exposed over Substrate RPC" readme = "README.md" diff --git a/utils/frame/try-runtime/cli/Cargo.toml b/utils/frame/try-runtime/cli/Cargo.toml index 11b899db4ca47..e922af971044c 100644 --- a/utils/frame/try-runtime/cli/Cargo.toml +++ b/utils/frame/try-runtime/cli/Cargo.toml @@ -4,7 +4,7 @@ version = "0.10.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" description = "Cli command runtime testing and dry-running" readme = "README.md" @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = "0.4.8" -parity-scale-codec = { version = "2.0.0" } +parity-scale-codec = { version = "2.3.1" } serde = "1.0.126" structopt = "0.3.8" @@ -31,6 +31,6 @@ sp-externalities = { version = "0.10.0-dev", path = "../../../../primitives/exte sp-version = { version = "4.0.0-dev", path = "../../../../primitives/version" } remote-externalities = { version = "0.10.0-dev", path = "../../remote-externalities" } -jsonrpsee-ws-client = { version = "0.3.0", default-features = false, features = [ +jsonrpsee-ws-client = { version = "0.3.1", default-features = false, features = [ "tokio1", ]} diff --git a/utils/frame/try-runtime/cli/src/commands/execute_block.rs b/utils/frame/try-runtime/cli/src/commands/execute_block.rs index 19422db90119f..216c63d00525d 100644 --- a/utils/frame/try-runtime/cli/src/commands/execute_block.rs +++ b/utils/frame/try-runtime/cli/src/commands/execute_block.rs @@ -143,7 +143,7 @@ where let builder = if command.overwrite_wasm_code { let (code_key, code) = extract_code(&config.chain_spec)?; - builder.inject_key_value(&[(code_key, code)]) + builder.inject_hashed_key_value(&[(code_key, code)]) } else { builder.inject_hashed_key(well_known_keys::CODE) }; diff --git a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs index 0526f5d327fb2..9125db13c78f9 100644 --- a/utils/frame/try-runtime/cli/src/commands/follow_chain.rs +++ b/utils/frame/try-runtime/cli/src/commands/follow_chain.rs @@ -112,8 +112,10 @@ where ..Default::default() })); - let new_ext = - builder.inject_key_value(&[(code_key.clone(), code.clone())]).build().await?; + let new_ext = builder + .inject_hashed_key_value(&[(code_key.clone(), code.clone())]) + .build() + .await?; log::info!( target: LOG_TARGET, "initialized state externalities at {:?}, storage root {:?}", diff --git a/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs b/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs index 6f37e4b3849fa..22120ef4b5fe4 100644 --- a/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs +++ b/utils/frame/try-runtime/cli/src/commands/offchain_worker.rs @@ -132,7 +132,7 @@ where let builder = if command.overwrite_wasm_code { let (code_key, code) = extract_code(&config.chain_spec)?; - builder.inject_key_value(&[(code_key, code)]) + builder.inject_hashed_key_value(&[(code_key, code)]) } else { builder.inject_hashed_key(well_known_keys::CODE) }; diff --git a/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs b/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs index 86f5548b8aafa..8de3cb3a32005 100644 --- a/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs +++ b/utils/frame/try-runtime/cli/src/commands/on_runtime_upgrade.rs @@ -54,7 +54,7 @@ where let ext = { let builder = command.state.builder::()?; let (code_key, code) = extract_code(&config.chain_spec)?; - builder.inject_key_value(&[(code_key, code)]).build().await? + builder.inject_hashed_key_value(&[(code_key, code)]).build().await? }; if let Some(uri) = command.state.live_uri() { diff --git a/utils/frame/try-runtime/cli/src/lib.rs b/utils/frame/try-runtime/cli/src/lib.rs index e7407f2f408fa..28e51b38f2ace 100644 --- a/utils/frame/try-runtime/cli/src/lib.rs +++ b/utils/frame/try-runtime/cli/src/lib.rs @@ -29,9 +29,9 @@ //! //! Some resources about the above: //! -//! 1. https://substrate.dev/docs/en/knowledgebase/integrate/try-runtime -//! 2. https://www.crowdcast.io/e/substrate-seminar/41 -//! 3. https://substrate.dev/docs/en/knowledgebase/advanced/executor +//! 1. +//! 2. +//! 3. //! //! --- //! @@ -117,6 +117,44 @@ //! //! Note that *none* of the try-runtime operations need unsafe RPCs. //! +//! ## Migration Best Practices +//! +//! One of the main use-cases of try-runtime is using it for testing storage migrations. The +//! following points makes sure you can *effectively* test your migrations with try-runtime. +//! +//! #### Adding pre/post hooks +//! +//! One of the gems that come only in the `try-runtime` feature flag is the `pre_upgrade` and +//! `post_upgrade` hooks for [`OnRuntimeUpgrade`]. This trait is implemented either inside the +//! pallet, or manually in a runtime, to define a migration. In both cases, these functions can be +//! added, given the right flag: +//! +//! ```ignore +//! #[cfg(feature = try-runtime)] +//! fn pre_upgrade() -> Result<(), &'static str> {} +//! +//! #[cfg(feature = try-runtime)] +//! fn post_upgrade() -> Result<(), &'static str> {} +//! ``` +//! +//! (The pallet macro syntax will support this simply as a part of `#[pallet::hooks]`). +//! +//! These hooks allow you to execute some code, only within the `on-runtime-upgrade` command, before +//! and after the migration. If any data needs to be temporarily stored between the pre/post +//! migration hooks, [`OnRuntimeUpgradeHelpersExt`] can help with that. +//! +//! #### Logging +//! +//! It is super helpful to make sure your migration code uses logging (always with a `runtime` log +//! target prefix, e.g. `runtime::balance`) and state exactly at which stage it is, and what it is +//! doing. +//! +//! #### Guarding migrations +//! +//! Always make sure that any migration code is guarded either by [`StorageVersion`], or by some +//! custom storage item, so that it is NEVER executed twice, even if the code lives in two +//! consecutive runtimes. +//! //! ## Examples //! //! Run the migrations of the local runtime on the state of polkadot, from the polkadot repo where diff --git a/utils/prometheus/Cargo.toml b/utils/prometheus/Cargo.toml index 4a6cec2cac774..4d218e233bcbc 100644 --- a/utils/prometheus/Cargo.toml +++ b/utils/prometheus/Cargo.toml @@ -5,7 +5,7 @@ version = "0.9.0" license = "Apache-2.0" authors = ["Parity Technologies "] edition = "2018" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" repository = "https://github.com/paritytech/substrate/" readme = "README.md" @@ -14,10 +14,10 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = "0.4.8" -prometheus = { version = "0.11.0", default-features = false } +prometheus = { version = "0.12.0", default-features = false } futures-util = { version = "0.3.17", default-features = false, features = ["io"] } derive_more = "0.99" -async-std = { version = "1.6.5", features = ["unstable"] } +async-std = { version = "1.10.0", features = ["unstable"] } tokio = "1.10" hyper = { version = "0.14.11", default-features = false, features = ["http1", "server", "tcp"] } diff --git a/utils/wasm-builder/Cargo.toml b/utils/wasm-builder/Cargo.toml index 721f332e130f0..92b1af753ef60 100644 --- a/utils/wasm-builder/Cargo.toml +++ b/utils/wasm-builder/Cargo.toml @@ -7,7 +7,7 @@ edition = "2018" readme = "README.md" repository = "https://github.com/paritytech/substrate/" license = "Apache-2.0" -homepage = "https://substrate.dev" +homepage = "https://substrate.io" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/utils/wasm-builder/src/prerequisites.rs b/utils/wasm-builder/src/prerequisites.rs index 0dad8b781ae5a..c45f7933a1de3 100644 --- a/utils/wasm-builder/src/prerequisites.rs +++ b/utils/wasm-builder/src/prerequisites.rs @@ -138,6 +138,13 @@ fn check_wasm_toolchain_installed( build_cmd.env_remove("CARGO_TARGET_DIR"); run_cmd.env_remove("CARGO_TARGET_DIR"); + // Make sure the host's flags aren't used here, e.g. if an alternative linker is specified + // in the RUSTFLAGS then the check we do here will break unless we clear these. + build_cmd.env_remove("CARGO_ENCODED_RUSTFLAGS"); + run_cmd.env_remove("CARGO_ENCODED_RUSTFLAGS"); + build_cmd.env_remove("RUSTFLAGS"); + run_cmd.env_remove("RUSTFLAGS"); + build_cmd.output().map_err(|_| err_msg.clone()).and_then(|s| { if s.status.success() { let version = run_cmd.output().ok().and_then(|o| String::from_utf8(o.stdout).ok()); diff --git a/utils/wasm-builder/src/wasm_project.rs b/utils/wasm-builder/src/wasm_project.rs index e1d537acbc182..e6bf4a1efd966 100644 --- a/utils/wasm-builder/src/wasm_project.rs +++ b/utils/wasm-builder/src/wasm_project.rs @@ -435,6 +435,10 @@ fn build_project(project: &Path, default_rustflags: &str, cargo_cmd: CargoComman // exclusive). The runner project is created in `CARGO_TARGET_DIR` and executing it will // create a sub target directory inside of `CARGO_TARGET_DIR`. .env_remove("CARGO_TARGET_DIR") + // As we are being called inside a build-script, this env variable is set. However, we set + // our own `RUSTFLAGS` and thus, we need to remove this. Otherwise cargo favors this + // env variable. + .env_remove("CARGO_ENCODED_RUSTFLAGS") // We don't want to call ourselves recursively .env(crate::SKIP_BUILD_ENV, "");