diff --git a/.cirrus.yml b/.cirrus.yml index b3ac31881e..9be0694369 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -140,7 +140,7 @@ task: FILE_ENV: "./ci/test/00_setup_env_win64.sh" task: - name: 'CentOS, dash, gui' + name: 'CentOS, depends, gui' << : *GLOBAL_TASK_TEMPLATE persistent_worker: labels: diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3e9f885d13..0b4b8bae1e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,4 +1,4 @@ -# Copyright (c) 2023 The Bitcoin Core developers +# Copyright (c) 2023-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -72,7 +72,7 @@ jobs: run: | # Run tests on commits after the last merge commit and before the PR head commit # Use clang++, because it is a bit faster and uses less memory than g++ - git rebase --exec "echo Running test-one-commit on \$( git log -1 ) && CC=clang CXX=clang++ cmake -B build -DWERROR=ON -DWITH_ZMQ=ON -DBUILD_GUI=ON -DBUILD_BENCH=ON -DBUILD_FUZZ_BINARY=ON -DWITH_BDB=ON -DWITH_USDT=ON -DCMAKE_CXX_FLAGS='-Wno-error=unused-member-function' && cmake --build build -j $(nproc) && ctest --output-on-failure --stop-on-failure --test-dir build -j $(nproc) && ./build/test/functional/test_runner.py -j $(( $(nproc) * 2 ))" ${{ env.TEST_BASE }} + git rebase --exec "echo Running test-one-commit on \$( git log -1 ) && CC=clang CXX=clang++ cmake -B build -DWERROR=ON -DWITH_ZMQ=ON -DBUILD_GUI=ON -DBUILD_BENCH=ON -DBUILD_FUZZ_BINARY=ON -DWITH_BDB=ON -DWITH_USDT=ON -DCMAKE_CXX_FLAGS='-Wno-error=unused-member-function' && cmake --build build -j $(nproc) && ctest --output-on-failure --stop-on-failure --test-dir build -j $(nproc) && ./build/test/functional/test_runner.py -j $(( $(nproc) * 2 )) --combinedlogslen=99999999" ${{ env.TEST_BASE }} macos-native-arm64: name: ${{ matrix.job-name }} diff --git a/CMakeLists.txt b/CMakeLists.txt index e7fa35b9ec..e542e217c5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -143,8 +143,8 @@ cmake_dependent_option(WITH_DBUS "Enable DBus support." ON "CMAKE_SYSTEM_NAME ST option(WITH_MULTIPROCESS "Build multiprocess bitcoin-node and bitcoin-gui executables in addition to monolithic bitcoind and bitcoin-qt executables. Requires libmultiprocess library. Experimental." OFF) if(WITH_MULTIPROCESS) - find_package(Libmultiprocess COMPONENTS Lib) - find_package(LibmultiprocessNative COMPONENTS Bin + find_package(Libmultiprocess REQUIRED COMPONENTS Lib) + find_package(LibmultiprocessNative REQUIRED COMPONENTS Bin NAMES Libmultiprocess ) endif() diff --git a/ci/README.md b/ci/README.md index b4edd4b191..377aae7fa0 100644 --- a/ci/README.md +++ b/ci/README.md @@ -9,7 +9,7 @@ If the repository is not a fresh git clone, you might have to clean files from p The ci needs to perform various sysadmin tasks such as installing packages or writing to the user's home directory. While it should be fine to run -the ci system locally on you development box, the ci scripts can generally be assumed to have received less review and +the ci system locally on your development box, the ci scripts can generally be assumed to have received less review and testing compared to other parts of the codebase. If you want to keep the work tree clean, you might want to run the ci system in a virtual machine with a Linux operating system of your choice. diff --git a/ci/test/00_setup_env.sh b/ci/test/00_setup_env.sh index d7d527c7f6..8163680ca9 100755 --- a/ci/test/00_setup_env.sh +++ b/ci/test/00_setup_env.sh @@ -68,3 +68,6 @@ export CI_BASE_PACKAGES=${CI_BASE_PACKAGES:-build-essential pkgconf curl ca-cert export GOAL=${GOAL:-install} export DIR_QA_ASSETS=${DIR_QA_ASSETS:-${BASE_SCRATCH_DIR}/qa-assets} export CI_RETRY_EXE=${CI_RETRY_EXE:-"retry --"} + +# The --platform argument used with `docker build` and `docker run`. +export CI_IMAGE_PLATFORM=${CI_IMAGE_PLATFORM:-"linux"} # Force linux, but use native arch by default diff --git a/ci/test/00_setup_env_arm.sh b/ci/test/00_setup_env_arm.sh index 749ae86cb2..67e43aca0d 100755 --- a/ci/test/00_setup_env_arm.sh +++ b/ci/test/00_setup_env_arm.sh @@ -10,7 +10,8 @@ export HOST=arm-linux-gnueabihf export DPKG_ADD_ARCH="armhf" export PACKAGES="python3-zmq g++-arm-linux-gnueabihf busybox libc6:armhf libstdc++6:armhf libfontconfig1:armhf libxcb1:armhf" export CONTAINER_NAME=ci_arm_linux -export CI_IMAGE_NAME_TAG="docker.io/arm64v8/debian:bookworm" # Check that https://packages.debian.org/bookworm/g++-arm-linux-gnueabihf (version 12.2, similar to guix) can cross-compile +export CI_IMAGE_NAME_TAG="docker.io/debian:bookworm" # Check that https://packages.debian.org/bookworm/g++-arm-linux-gnueabihf (version 12.2, similar to guix) can cross-compile +export CI_IMAGE_PLATFORM="linux/arm64" export USE_BUSY_BOX=true export RUN_UNIT_TESTS=true export RUN_FUNCTIONAL_TESTS=false diff --git a/ci/test/00_setup_env_i686_multiprocess.sh b/ci/test/00_setup_env_i686_multiprocess.sh index 5810ae8639..e659486407 100755 --- a/ci/test/00_setup_env_i686_multiprocess.sh +++ b/ci/test/00_setup_env_i686_multiprocess.sh @@ -8,7 +8,8 @@ export LC_ALL=C.UTF-8 export HOST=i686-pc-linux-gnu export CONTAINER_NAME=ci_i686_multiprocess -export CI_IMAGE_NAME_TAG="docker.io/amd64/ubuntu:24.04" +export CI_IMAGE_NAME_TAG="docker.io/ubuntu:24.04" +export CI_IMAGE_PLATFORM="linux/amd64" export PACKAGES="llvm clang g++-multilib" export DEP_OPTS="DEBUG=1 MULTIPROCESS=1" export GOAL="install" diff --git a/ci/test/00_setup_env_native_centos.sh b/ci/test/00_setup_env_native_centos.sh index 878778506f..ff4b9bc570 100755 --- a/ci/test/00_setup_env_native_centos.sh +++ b/ci/test/00_setup_env_native_centos.sh @@ -7,9 +7,8 @@ export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_native_centos -export CI_IMAGE_NAME_TAG="quay.io/centos/centos:stream9" -export STREAM_GCC_V="12" -export CI_BASE_PACKAGES="gcc-toolset-${STREAM_GCC_V}-gcc-c++ glibc-devel gcc-toolset-${STREAM_GCC_V}-libstdc++-devel ccache make git python3 python3-pip which patch xz procps-ng dash rsync coreutils bison e2fsprogs cmake" +export CI_IMAGE_NAME_TAG="quay.io/centos/centos:stream10" +export CI_BASE_PACKAGES="gcc-c++ glibc-devel libstdc++-devel ccache make git python3 python3-pip which patch xz procps-ng ksh rsync coreutils bison e2fsprogs cmake" export PIP_PACKAGES="pyzmq" export GOAL="install" export BITCOIN_CONFIG="-DWITH_ZMQ=ON -DBUILD_GUI=ON -DREDUCE_EXPORTS=ON" diff --git a/ci/test/00_setup_env_s390x.sh b/ci/test/00_setup_env_s390x.sh index eb12bc6fd1..0e0ddf3382 100755 --- a/ci/test/00_setup_env_s390x.sh +++ b/ci/test/00_setup_env_s390x.sh @@ -9,7 +9,8 @@ export LC_ALL=C.UTF-8 export HOST=s390x-linux-gnu export PACKAGES="python3-zmq" export CONTAINER_NAME=ci_s390x -export CI_IMAGE_NAME_TAG="docker.io/s390x/ubuntu:24.04" +export CI_IMAGE_NAME_TAG="docker.io/ubuntu:24.04" +export CI_IMAGE_PLATFORM="linux/s390x" export TEST_RUNNER_EXTRA="--exclude rpc_bind,feature_bind_extra" # Excluded for now, see https://github.com/bitcoin/bitcoin/issues/17765#issuecomment-602068547 export RUN_FUNCTIONAL_TESTS=true export GOAL="install" diff --git a/ci/test/00_setup_env_win64.sh b/ci/test/00_setup_env_win64.sh index 25da64c524..985b2941bf 100755 --- a/ci/test/00_setup_env_win64.sh +++ b/ci/test/00_setup_env_win64.sh @@ -7,7 +7,8 @@ export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_win64 -export CI_IMAGE_NAME_TAG="docker.io/amd64/debian:bookworm" # Check that https://packages.debian.org/bookworm/g++-mingw-w64-x86-64-posix (version 12.2, similar to guix) can cross-compile +export CI_IMAGE_NAME_TAG="docker.io/debian:bookworm" # Check that https://packages.debian.org/bookworm/g++-mingw-w64-x86-64-posix (version 12.2, similar to guix) can cross-compile +export CI_IMAGE_PLATFORM="linux/amd64" export HOST=x86_64-w64-mingw32 export DPKG_ADD_ARCH="i386" export PACKAGES="nsis g++-mingw-w64-x86-64-posix wine-binfmt wine64 wine32 file" diff --git a/ci/test/02_run_container.sh b/ci/test/02_run_container.sh index ce01db325c..28d0898a4a 100755 --- a/ci/test/02_run_container.sh +++ b/ci/test/02_run_container.sh @@ -14,7 +14,7 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then # Though, exclude those with newlines to avoid parsing problems. python3 -c 'import os; [print(f"{key}={value}") for key, value in os.environ.items() if "\n" not in value and "HOME" != key and "PATH" != key and "USER" != key]' | tee "/tmp/env-$USER-$CONTAINER_NAME" # System-dependent env vars must be kept as is. So read them from the container. - docker run --rm "${CI_IMAGE_NAME_TAG}" bash -c "env | grep --extended-regexp '^(HOME|PATH|USER)='" | tee --append "/tmp/env-$USER-$CONTAINER_NAME" + docker run --platform="${CI_IMAGE_PLATFORM}" --rm "${CI_IMAGE_NAME_TAG}" bash -c "env | grep --extended-regexp '^(HOME|PATH|USER)='" | tee --append "/tmp/env-$USER-$CONTAINER_NAME" # Env vars during the build can not be changed. For example, a modified # $MAKEJOBS is ignored in the build process. Use --cpuset-cpus as an @@ -31,6 +31,7 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then --build-arg "CI_IMAGE_NAME_TAG=${CI_IMAGE_NAME_TAG}" \ --build-arg "FILE_ENV=${FILE_ENV}" \ $MAYBE_CPUSET \ + --platform="${CI_IMAGE_PLATFORM}" \ --label="${CI_IMAGE_LABEL}" \ --tag="${CONTAINER_NAME}" \ "${BASE_READ_ONLY_DIR}" @@ -100,6 +101,7 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then --env-file /tmp/env-$USER-$CONTAINER_NAME \ --name "$CONTAINER_NAME" \ --network ci-ip6net \ + --platform="${CI_IMAGE_PLATFORM}" \ "$CONTAINER_NAME") export CI_CONTAINER_ID export CI_EXEC_CMD_PREFIX="docker exec ${CI_CONTAINER_ID}" diff --git a/ci/test/03_test_script.sh b/ci/test/03_test_script.sh index 6e77a8927c..bd5c86bfbe 100755 --- a/ci/test/03_test_script.sh +++ b/ci/test/03_test_script.sh @@ -92,9 +92,7 @@ fi if [ -z "$NO_DEPENDS" ]; then if [[ $CI_IMAGE_NAME_TAG == *centos* ]]; then - SHELL_OPTS="CONFIG_SHELL=/bin/dash" - # shellcheck disable=SC1090 - source "/opt/rh/gcc-toolset-${STREAM_GCC_V}/enable" + SHELL_OPTS="CONFIG_SHELL=/bin/ksh" # Temporarily use ksh instead of dash, until https://bugzilla.redhat.com/show_bug.cgi?id=2335416 is fixed. else SHELL_OPTS="CONFIG_SHELL=" fi diff --git a/cmake/leveldb.cmake b/cmake/leveldb.cmake index 823a5d8e3d..2eae3a1f0a 100644 --- a/cmake/leveldb.cmake +++ b/cmake/leveldb.cmake @@ -60,7 +60,6 @@ target_compile_definitions(leveldb HAVE_FULLFSYNC=$ HAVE_O_CLOEXEC=$ FALLTHROUGH_INTENDED=[[fallthrough]] - LEVELDB_IS_BIG_ENDIAN=$ $<$>:LEVELDB_PLATFORM_POSIX> $<$:LEVELDB_PLATFORM_WINDOWS> $<$:_UNICODE;UNICODE> diff --git a/contrib/guix/README.md b/contrib/guix/README.md index 2c9056ce9c..f0ba490366 100644 --- a/contrib/guix/README.md +++ b/contrib/guix/README.md @@ -247,7 +247,7 @@ details. * _**SDK_PATH**_ Set the path where _extracted_ SDKs can be found. This is passed through to - the depends tree. Note that this is should be set to the _parent_ directory of + the depends tree. Note that this should be set to the _parent_ directory of the actual SDK (e.g. `SDK_PATH=$HOME/Downloads/macOS-SDKs` instead of `$HOME/Downloads/macOS-SDKs/Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers`). diff --git a/contrib/tracing/README.md b/contrib/tracing/README.md index cf59d7e2bb..aa48b3e3f3 100644 --- a/contrib/tracing/README.md +++ b/contrib/tracing/README.md @@ -155,7 +155,7 @@ $ python3 contrib/tracing/log_raw_p2p_msgs.py $(pidof bitcoind) ``` Logging raw P2P messages. -Messages larger that about 32kb will be cut off! +Messages larger than about 32kb will be cut off! Some messages might be lost! outbound msg 'inv' from peer 4 (outbound-full-relay, XX.XXX.XX.4:8333) with 253 bytes: 0705000000be2245c8f844c9f763748e1a7… … diff --git a/contrib/tracing/log_raw_p2p_msgs.py b/contrib/tracing/log_raw_p2p_msgs.py index 094888c13e..69f457acb1 100755 --- a/contrib/tracing/log_raw_p2p_msgs.py +++ b/contrib/tracing/log_raw_p2p_msgs.py @@ -168,7 +168,7 @@ def handle_outbound(_, data, size): bpf["outbound_messages"].open_perf_buffer(handle_outbound) print("Logging raw P2P messages.") - print("Messages larger that about 32kb will be cut off!") + print("Messages larger than about 32kb will be cut off!") print("Some messages might be lost!") while True: try: diff --git a/depends/Makefile b/depends/Makefile index 9ba8213a92..1053c8a249 100644 --- a/depends/Makefile +++ b/depends/Makefile @@ -141,8 +141,16 @@ include packages/packages.mk # 2. Before including packages/*.mk (excluding packages/packages.mk), since # they rely on the build_id variables # -build_id:=$(shell env CC='$(build_CC)' C_STANDARD='$(C_STANDARD)' CXX='$(build_CXX)' CXX_STANDARD='$(CXX_STANDARD)' AR='$(build_AR)' NM='$(build_NM)' RANLIB='$(build_RANLIB)' STRIP='$(build_STRIP)' SHA256SUM='$(build_SHA256SUM)' DEBUG='$(DEBUG)' LTO='$(LTO)' NO_HARDEN='$(NO_HARDEN)' ./gen_id '$(BUILD_ID_SALT)' 'GUIX_ENVIRONMENT=$(realpath $(GUIX_ENVIRONMENT))') -$(host_arch)_$(host_os)_id:=$(shell env CC='$(host_CC)' C_STANDARD='$(C_STANDARD)' CXX='$(host_CXX)' CXX_STANDARD='$(CXX_STANDARD)' AR='$(host_AR)' NM='$(host_NM)' RANLIB='$(host_RANLIB)' STRIP='$(host_STRIP)' SHA256SUM='$(build_SHA256SUM)' DEBUG='$(DEBUG)' LTO='$(LTO)' NO_HARDEN='$(NO_HARDEN)' ./gen_id '$(HOST_ID_SALT)' 'GUIX_ENVIRONMENT=$(realpath $(GUIX_ENVIRONMENT))') +build_id:=$(shell env CC='$(build_CC)' C_STANDARD='$(C_STANDARD)' CXX='$(build_CXX)' CXX_STANDARD='$(CXX_STANDARD)' \ + AR='$(build_AR)' NM='$(build_NM)' RANLIB='$(build_RANLIB)' STRIP='$(build_STRIP)' SHA256SUM='$(build_SHA256SUM)' \ + DEBUG='$(DEBUG)' NO_HARDEN='$(NO_HARDEN)' \ + ./gen_id '$(BUILD_ID_SALT)' 'GUIX_ENVIRONMENT=$(realpath $(GUIX_ENVIRONMENT))') + +$(host_arch)_$(host_os)_id:=$(shell env CC='$(host_CC)' C_STANDARD='$(C_STANDARD)' CXX='$(host_CXX)' CXX_STANDARD='$(CXX_STANDARD)' \ + CPPFLAGS='$(CPPFLAGS)' CFLAGS='$(CFLAGS)' CXXFLAGS='$(CXXFLAGS)' LDFLAGS='$(LDFLAGS)' \ + AR='$(host_AR)' NM='$(host_NM)' RANLIB='$(host_RANLIB)' STRIP='$(host_STRIP)' SHA256SUM='$(build_SHA256SUM)' \ + DEBUG='$(DEBUG)' LTO='$(LTO)' NO_HARDEN='$(NO_HARDEN)' \ + ./gen_id '$(HOST_ID_SALT)' 'GUIX_ENVIRONMENT=$(realpath $(GUIX_ENVIRONMENT))') boost_packages_$(NO_BOOST) = $(boost_packages) diff --git a/depends/gen_id b/depends/gen_id index e2e2273b2d..fe6d163547 100755 --- a/depends/gen_id +++ b/depends/gen_id @@ -1,8 +1,9 @@ #!/usr/bin/env bash # Usage: env [ CC=... ] [ C_STANDARD=...] [ CXX=... ] [CXX_STANDARD=...] \ +# [ CPPFLAGS=... ] [CFLAGS=...] [CXXFLAGS=...] [LDFLAGS=...] \ # [ AR=... ] [ NM=... ] [ RANLIB=... ] [ STRIP=... ] [ DEBUG=... ] \ -# [ LTO=... ] [ NO_HARDEN=... ] ./build-id [ID_SALT]... +# [ LTO=... ] [ NO_HARDEN=... ] ./gen_id [ID_SALT]... # # Prints to stdout a SHA256 hash representing the current toolset, used by # depends/Makefile as a build id for caching purposes (detecting when the @@ -34,6 +35,13 @@ echo "$@" echo "END ID SALT" + echo "BEGIN FLAGS" + echo "CPPFLAGS=${CPPFLAGS}" + echo "CFLAGS=${CFLAGS}" + echo "CXXFLAGS=${CXXFLAGS}" + echo "LDFLAGS=${LDFLAGS}" + echo "END FLAGS" + # GCC only prints COLLECT_LTO_WRAPPER when invoked with just "-v", but we want # the information from "-v -E -" as well, so just include both. echo "BEGIN CC" @@ -50,6 +58,17 @@ echo "CXX_STANDARD=${CXX_STANDARD}" echo "END CXX" + # We use lld when cross-compiling for macOS, and it's version should + # be tied to LLVM. However someone compiling with GCC and -fuse-ld=lld + # would not see a cache bust if the LLVM toolchain was updated. + echo "BEGIN lld" + bash -c "ld.lld --version" + echo "END lld" + + echo "BEGIN mold" + bash -c "mold --version" + echo "END mold" + echo "BEGIN AR" bash -c "${AR} --version" env | grep '^AR_' diff --git a/depends/packages/libevent.mk b/depends/packages/libevent.mk index 9507fe9ddf..14ad24a1de 100644 --- a/depends/packages/libevent.mk +++ b/depends/packages/libevent.mk @@ -10,9 +10,10 @@ $(package)_build_subdir=build # version as we do in releases. Due to quirks in libevents build system, this # is also required to enable support for ipv6. See #19375. define $(package)_set_vars - $(package)_config_opts=-DEVENT__DISABLE_BENCHMARK=ON -DEVENT__DISABLE_OPENSSL=ON + $(package)_config_opts=-DCMAKE_BUILD_TYPE=None -DEVENT__DISABLE_BENCHMARK=ON -DEVENT__DISABLE_OPENSSL=ON $(package)_config_opts+=-DEVENT__DISABLE_SAMPLES=ON -DEVENT__DISABLE_REGRESS=ON $(package)_config_opts+=-DEVENT__DISABLE_TESTS=ON -DEVENT__LIBRARY_TYPE=STATIC + $(package)_cflags += -ffile-prefix-map=$($(package)_extract_dir)=/usr $(package)_cppflags += -D_GNU_SOURCE $(package)_cppflags_mingw32=-D_WIN32_WINNT=0x0A00 diff --git a/depends/packages/qt.mk b/depends/packages/qt.mk index 727dcfc9ef..d41ac4e784 100644 --- a/depends/packages/qt.mk +++ b/depends/packages/qt.mk @@ -1,9 +1,9 @@ package=qt -$(package)_version=5.15.14 +$(package)_version=5.15.16 $(package)_download_path=https://download.qt.io/official_releases/qt/5.15/$($(package)_version)/submodules $(package)_suffix=everywhere-opensource-src-$($(package)_version).tar.xz $(package)_file_name=qtbase-$($(package)_suffix) -$(package)_sha256_hash=500d3b390048e9538c28b5f523dfea6936f9c2e10d24ab46580ff57d430b98be +$(package)_sha256_hash=b04815058c18058b6ba837206756a2c87d1391f07a0dcb0dd314f970fd041592 $(package)_linux_dependencies=freetype fontconfig libxcb libxkbcommon libxcb_util libxcb_util_render libxcb_util_keysyms libxcb_util_image libxcb_util_wm $(package)_qt_libs=corelib network widgets gui plugins testlib $(package)_linguist_tools = lrelease lupdate lconvert @@ -17,19 +17,17 @@ $(package)_patches += no_warnings_for_symbols.patch $(package)_patches += rcc_hardcode_timestamp.patch $(package)_patches += duplicate_lcqpafonts.patch $(package)_patches += guix_cross_lib_path.patch -$(package)_patches += fix-macos-linker.patch $(package)_patches += memory_resource.patch $(package)_patches += clang_18_libpng.patch $(package)_patches += utc_from_string_no_optimize.patch $(package)_patches += windows_lto.patch $(package)_patches += darwin_no_libm.patch -$(package)_patches += zlib-timebits64.patch $(package)_qttranslations_file_name=qttranslations-$($(package)_suffix) -$(package)_qttranslations_sha256_hash=5b94d1a11b566908622fcca2f8b799744d2f8a68da20be4caa5953ed63b10489 +$(package)_qttranslations_sha256_hash=415dbbb82a75dfc9a7be969e743bee54c0e6867be37bce4cf8f03da39f20112a $(package)_qttools_file_name=qttools-$($(package)_suffix) -$(package)_qttools_sha256_hash=12061a85baf5f4de8fbc795e1d3872b706f340211b9e70962caeffc6f5e89563 +$(package)_qttools_sha256_hash=1cab11887faca54af59f4995ee435c9ad98d194e9e6889c846692c8b6815fc1c $(package)_extra_sources = $($(package)_qttranslations_file_name) $(package)_extra_sources += $($(package)_qttools_file_name) @@ -223,7 +221,6 @@ endef define $(package)_preprocess_cmds cp $($(package)_patch_dir)/qt.pro qt.pro && \ cp $($(package)_patch_dir)/qttools_src.pro qttools/src/src.pro && \ - patch -p1 -i $($(package)_patch_dir)/fix-macos-linker.patch && \ patch -p1 -i $($(package)_patch_dir)/dont_hardcode_pwd.patch && \ patch -p1 -i $($(package)_patch_dir)/no-xlib.patch && \ patch -p1 -i $($(package)_patch_dir)/qtbase-moc-ignore-gcc-macro.patch && \ @@ -236,7 +233,6 @@ define $(package)_preprocess_cmds patch -p1 -i $($(package)_patch_dir)/guix_cross_lib_path.patch && \ patch -p1 -i $($(package)_patch_dir)/windows_lto.patch && \ patch -p1 -i $($(package)_patch_dir)/darwin_no_libm.patch && \ - patch -p1 -i $($(package)_patch_dir)/zlib-timebits64.patch && \ mkdir -p qtbase/mkspecs/macx-clang-linux &&\ cp -f qtbase/mkspecs/macx-clang/qplatformdefs.h qtbase/mkspecs/macx-clang-linux/ &&\ cp -f $($(package)_patch_dir)/mac-qmake.conf qtbase/mkspecs/macx-clang-linux/qmake.conf && \ diff --git a/depends/patches/qt/fix-macos-linker.patch b/depends/patches/qt/fix-macos-linker.patch deleted file mode 100644 index e439685656..0000000000 --- a/depends/patches/qt/fix-macos-linker.patch +++ /dev/null @@ -1,55 +0,0 @@ -qmake: Don't error out if QMAKE_DEFAULT_LIBDIRS is empty on macOS - -The new linker in Xcode 15 doesn't provide any default linker or -framework paths when requested via -v, but still seems to use the -default paths documented in the ld man page. - -We trust that linker will do the right thing, even if we don't -know of its default linker paths. - -We also need to opt out of the default fallback logic to -set the libdirs to /lib and /usr/lib. - -This may result in UnixMakefileGenerator::findLibraries finding -different libraries than expected, if additional paths are -passed with -L, which will then take precedence for qmake, -even if the linker itself will use the library from the -SDK's default paths. This should hopefully not be an issue -in practice, as we don't turn -lFoo into absolute paths in -qmake, so the only risk is that we're picking up the wrong -prl files and adding additional dependencies that the lib -in the SDK doesn't have. - -Upstream commits: - - Qt 5.15.16: Not yet publicly available. - - Qt dev: cdf64b0e47115cc473e1afd1472b4b09e130b2a5 - -For other Qt branches see -https://codereview.qt-project.org/q/I2347b26e2df0828471373b0e15b8c9089274c65d - ---- old/qtbase/mkspecs/features/toolchain.prf -+++ new/qtbase/mkspecs/features/toolchain.prf -@@ -288,9 +288,12 @@ isEmpty($${target_prefix}.INCDIRS) { - } - } - } -- isEmpty(QMAKE_DEFAULT_LIBDIRS)|isEmpty(QMAKE_DEFAULT_INCDIRS): \ -+ isEmpty(QMAKE_DEFAULT_INCDIRS): \ - !integrity: \ -- error("failed to parse default search paths from compiler output") -+ error("failed to parse default include paths from compiler output") -+ isEmpty(QMAKE_DEFAULT_LIBDIRS): \ -+ !integrity:!darwin: \ -+ error("failed to parse default library paths from compiler output") - QMAKE_DEFAULT_LIBDIRS = $$unique(QMAKE_DEFAULT_LIBDIRS) - } else: ghs { - cmd = $$QMAKE_CXX $$QMAKE_CXXFLAGS -$${LITERAL_HASH} -o /tmp/fake_output /tmp/fake_input.cpp -@@ -412,7 +415,7 @@ isEmpty($${target_prefix}.INCDIRS) { - QMAKE_DEFAULT_INCDIRS = $$split(INCLUDE, $$QMAKE_DIRLIST_SEP) - } - -- unix:if(!cross_compile|host_build) { -+ unix:!darwin:if(!cross_compile|host_build) { - isEmpty(QMAKE_DEFAULT_INCDIRS): QMAKE_DEFAULT_INCDIRS = /usr/include /usr/local/include - isEmpty(QMAKE_DEFAULT_LIBDIRS): QMAKE_DEFAULT_LIBDIRS = /lib /usr/lib - } diff --git a/depends/patches/qt/memory_resource.patch b/depends/patches/qt/memory_resource.patch index 312f0669f6..14e25121c0 100644 --- a/depends/patches/qt/memory_resource.patch +++ b/depends/patches/qt/memory_resource.patch @@ -14,36 +14,3 @@ and https://bugreports.qt.io/browse/QTBUG-114316 # include # include #else - ---- a/qtbase/src/corelib/global/qcompilerdetection.h -+++ b/qtbase/src/corelib/global/qcompilerdetection.h -@@ -1055,16 +1055,22 @@ - # endif // !_HAS_CONSTEXPR - # endif // !__GLIBCXX__ && !_LIBCPP_VERSION - # endif // Q_OS_QNX --# if (defined(Q_CC_CLANG) || defined(Q_CC_INTEL)) && defined(Q_OS_MAC) && defined(__GNUC_LIBSTD__) \ -- && ((__GNUC_LIBSTD__-0) * 100 + __GNUC_LIBSTD_MINOR__-0 <= 402) -+# if (defined(Q_CC_CLANG) || defined(Q_CC_INTEL)) && defined(Q_OS_MAC) -+# if defined(__GNUC_LIBSTD__) && ((__GNUC_LIBSTD__-0) * 100 + __GNUC_LIBSTD_MINOR__-0 <= 402) - // Apple has not updated libstdc++ since 2007, which means it does not have - // or std::move. Let's disable these features --# undef Q_COMPILER_INITIALIZER_LISTS --# undef Q_COMPILER_RVALUE_REFS --# undef Q_COMPILER_REF_QUALIFIERS -+# undef Q_COMPILER_INITIALIZER_LISTS -+# undef Q_COMPILER_RVALUE_REFS -+# undef Q_COMPILER_REF_QUALIFIERS - // Also disable , since it's clearly not there --# undef Q_COMPILER_ATOMICS --# endif -+# undef Q_COMPILER_ATOMICS -+# endif -+# if defined(__cpp_lib_memory_resource) \ -+ && ((defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && __MAC_OS_X_VERSION_MIN_REQUIRED < 140000) \ -+ || (defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && __IPHONE_OS_VERSION_MIN_REQUIRED < 170000)) -+# undef __cpp_lib_memory_resource // Only supported on macOS 14 and iOS 17 -+# endif -+# endif // (defined(Q_CC_CLANG) || defined(Q_CC_INTEL)) && defined(Q_OS_MAC) - # if defined(Q_CC_CLANG) && defined(Q_CC_INTEL) && Q_CC_INTEL >= 1500 - // ICC 15.x and 16.0 have their own implementation of std::atomic, which is activated when in Clang mode - // (probably because libc++'s on OS X failed to compile), but they're missing some diff --git a/depends/patches/qt/zlib-timebits64.patch b/depends/patches/qt/zlib-timebits64.patch deleted file mode 100644 index 139c1dfa77..0000000000 --- a/depends/patches/qt/zlib-timebits64.patch +++ /dev/null @@ -1,31 +0,0 @@ -From a566e156b3fa07b566ddbf6801b517a9dba04fa3 Mon Sep 17 00:00:00 2001 -From: Mark Adler -Date: Sat, 29 Jul 2023 22:13:09 -0700 -Subject: [PATCH] Avoid compiler complaints if _TIME_BITS defined when building - zlib. - -zlib does not use time_t, so _TIME_BITS is irrelevant. However it -may be defined anyway as part of a sledgehammer indiscriminately -applied to all builds. - -From https://github.com/madler/zlib/commit/a566e156b3fa07b566ddbf6801b517a9dba04fa3.patch ---- - qtbase/src/3rdparty/zlib/src/gzguts.h | 5 ++--- - 1 file changed, 2 insertions(+), 3 deletions(-) - -diff --git a/qtbase/src/3rdparty/zlib/src/gzguts.h b/qtbase/src/3rdparty/zlib/src/gzguts.h -index e23f831f5..f9375047e 100644 ---- a/qtbase/src/3rdparty/zlib/src/gzguts.h -+++ b/qtbase/src/3rdparty/zlib/src/gzguts.h -@@ -26,9 +26,8 @@ - # ifndef _LARGEFILE_SOURCE - # define _LARGEFILE_SOURCE 1 - # endif --# ifdef _FILE_OFFSET_BITS --# undef _FILE_OFFSET_BITS --# endif -+# undef _FILE_OFFSET_BITS -+# undef _TIME_BITS - #endif - - #ifdef HAVE_HIDDEN diff --git a/doc/benchmarking.md b/doc/benchmarking.md index 8f836219c9..5e3b6ddadf 100644 --- a/doc/benchmarking.md +++ b/doc/benchmarking.md @@ -47,12 +47,29 @@ or using a regex filter to only run certain benchmarks. Notes --------------------- -More benchmarks are needed for, in no particular order: -- Script Validation -- Coins database -- Memory pool -- Cuckoo Cache -- P2P throughput + +Benchmarks help with monitoring for performance regressions and can act as a +scope for future performance improvements. They should cover components that +impact performance critical functions of the system. Functions are performance +critical if their performance impacts users and the cost associated with a +degradation in performance is high. A non-exhaustive list: + +- Initial block download (Cost: slow IBD results in full node operation being + less accessible) +- Block template creation (Cost: slow block template creation may result in + lower fee revenue for miners) +- Block propagation (Cost: slow block propagation may increase the rate of + orphaned blocks and mining centralization) + +A change aiming to improve the performance may be rejected when a clear +end-to-end performance improvement cannot be demonstrated. The change might +also be rejected if the code bloat or review/maintenance burden is too high to +justify the improvement. + +Benchmarks are ill-suited for testing denial-of-service issues as they are +restricted to the same input set (introducing bias). [Fuzz +tests](/doc/fuzzing.md) are better suited for this purpose, as they are +specifically aimed at exploring the possible input space. Going Further -------------------- diff --git a/doc/dependencies.md b/doc/dependencies.md index ec068801a8..2163d52da3 100644 --- a/doc/dependencies.md +++ b/doc/dependencies.md @@ -29,7 +29,7 @@ You can find installation instructions in the `build-*.md` file for your platfor | [Fontconfig](../depends/packages/fontconfig.mk) | [link](https://www.freedesktop.org/wiki/Software/fontconfig/) | [2.12.6](https://github.com/bitcoin/bitcoin/pull/23495) | 2.6 | Yes | | [FreeType](../depends/packages/freetype.mk) | [link](https://freetype.org) | [2.11.0](https://github.com/bitcoin/bitcoin/commit/01544dd78ccc0b0474571da854e27adef97137fb) | 2.3.0 | Yes | | [qrencode](../depends/packages/qrencode.mk) | [link](https://fukuchi.org/works/qrencode/) | [4.1.1](https://github.com/bitcoin/bitcoin/pull/27312) | | No | -| [Qt](../depends/packages/qt.mk) | [link](https://download.qt.io/official_releases/qt/) | [5.15.14](https://github.com/bitcoin/bitcoin/pull/30198) | [5.11.3](https://github.com/bitcoin/bitcoin/pull/24132) | No | +| [Qt](../depends/packages/qt.mk) | [link](https://download.qt.io/official_releases/qt/) | [5.15.16](https://github.com/bitcoin/bitcoin/pull/30774) | [5.11.3](https://github.com/bitcoin/bitcoin/pull/24132) | No | ### Notifications | Dependency | Releases | Version used | Minimum required | Runtime | diff --git a/doc/fuzzing.md b/doc/fuzzing.md index 365314a592..3b3c2c4c0e 100644 --- a/doc/fuzzing.md +++ b/doc/fuzzing.md @@ -101,6 +101,18 @@ INFO: seed corpus: files: 991 min: 1b max: 1858b total: 288291b rss: 150Mb … ``` +## Using the MemorySanitizer (MSan) + +MSan [requires](https://clang.llvm.org/docs/MemorySanitizer.html#handling-external-code) +that all linked code be instrumented. The exact steps to achieve this may vary +but involve compiling `clang` from source, using the built `clang` to compile +an instrumentalized libc++, then using it to build [Bitcoin Core dependencies +from source](../depends/README.md) and finally the Bitcoin Core fuzz binary +itself. One can use the MSan CI job as an example for how to perform these +steps. + +Valgrind is an alternative to MSan that does not require building a custom libc++. + ## Run without sanitizers for increased throughput Fuzzing on a harness compiled with `-DSANITIZERS=address,fuzzer,undefined` is diff --git a/doc/policy/packages.md b/doc/policy/packages.md index 9bd9becfdc..febdbbf13c 100644 --- a/doc/policy/packages.md +++ b/doc/policy/packages.md @@ -45,7 +45,7 @@ The following rules are enforced for all packages: - No more than MAX_REPLACEMENT_CANDIDATES transactions can be replaced, analogous to regular [replacement rule](./mempool-replacements.md) 5). - - Replacements must pay more total total fees at the incremental relay fee (analogous to + - Replacements must pay more total fees at the incremental relay fee (analogous to regular [replacement rules](./mempool-replacements.md) 3 and 4). - Parent feerate must be lower than package feerate. diff --git a/doc/release-notes-31583.md b/doc/release-notes-31583.md new file mode 100644 index 0000000000..246066d4e4 --- /dev/null +++ b/doc/release-notes-31583.md @@ -0,0 +1,9 @@ +Updated RPCs +--- +- `getmininginfo` now returns `nBits` and the current target in the `target` field. It also returns a `next` object which specifies the `height`, `nBits`, `difficulty`, and `target` for the next block. +- `getblock` and `getblockheader` now return the current target in the `target` field +- `getblockchaininfo` and `getchainstates` now return `nBits` and the current target in the `target` field + +REST interface +--- +- `GET /rest/block/.json` and `GET /rest/headers/.json` now return the current target in the `target` field diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 40d1ce74b9..736c14e7d3 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -111,6 +111,7 @@ add_library(bitcoin_common STATIC EXCLUDE_FROM_ALL auxpow.cpp base58.cpp bech32.cpp + chain.cpp chainparams.cpp chainparamsbase.cpp coins.cpp @@ -144,6 +145,7 @@ add_library(bitcoin_common STATIC EXCLUDE_FROM_ALL outputtype.cpp policy/feerate.cpp policy/policy.cpp + pow.cpp protocol.cpp psbt.cpp rpc/external_signer.cpp @@ -202,7 +204,6 @@ add_library(bitcoin_node STATIC EXCLUDE_FROM_ALL bip324.cpp blockencodings.cpp blockfilter.cpp - chain.cpp consensus/tx_verify.cpp dbwrapper.cpp deploymentstatus.cpp @@ -264,7 +265,6 @@ add_library(bitcoin_node STATIC EXCLUDE_FROM_ALL policy/rbf.cpp policy/settings.cpp policy/truc_policy.cpp - pow.cpp rest.cpp rpc/auxpow_miner.cpp rpc/blockchain.cpp diff --git a/src/bench/CMakeLists.txt b/src/bench/CMakeLists.txt index 4589ef177c..c55bbb1e05 100644 --- a/src/bench/CMakeLists.txt +++ b/src/bench/CMakeLists.txt @@ -44,7 +44,7 @@ add_executable(bench_bitcoin pool.cpp prevector.cpp random.cpp - readblock.cpp + readwriteblock.cpp rollingbloom.cpp rpc_blockchain.cpp rpc_mempool.cpp diff --git a/src/bench/readblock.cpp b/src/bench/readblock.cpp deleted file mode 100644 index 058d953b4e..0000000000 --- a/src/bench/readblock.cpp +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (c) 2023 The Bitcoin Core developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -static FlatFilePos WriteBlockToDisk(ChainstateManager& chainman) -{ - DataStream stream{benchmark::data::block413567}; - CBlock block; - stream >> TX_WITH_WITNESS(block); - - return chainman.m_blockman.SaveBlockToDisk(block, 0); -} - -static void ReadBlockFromDiskTest(benchmark::Bench& bench) -{ - const auto testing_setup{MakeNoLogFileContext(ChainType::MAIN)}; - ChainstateManager& chainman{*testing_setup->m_node.chainman}; - - CBlock block; - const auto pos{WriteBlockToDisk(chainman)}; - - bench.run([&] { - const auto success{chainman.m_blockman.ReadBlockFromDisk(block, pos)}; - assert(success); - }); -} - -static void ReadRawBlockFromDiskTest(benchmark::Bench& bench) -{ - const auto testing_setup{MakeNoLogFileContext(ChainType::MAIN)}; - ChainstateManager& chainman{*testing_setup->m_node.chainman}; - - std::vector block_data; - const auto pos{WriteBlockToDisk(chainman)}; - - bench.run([&] { - const auto success{chainman.m_blockman.ReadRawBlockFromDisk(block_data, pos)}; - assert(success); - }); -} - -BENCHMARK(ReadBlockFromDiskTest, benchmark::PriorityLevel::HIGH); -BENCHMARK(ReadRawBlockFromDiskTest, benchmark::PriorityLevel::HIGH); diff --git a/src/bench/readwriteblock.cpp b/src/bench/readwriteblock.cpp new file mode 100644 index 0000000000..cdf86185ae --- /dev/null +++ b/src/bench/readwriteblock.cpp @@ -0,0 +1,68 @@ +// Copyright (c) 2023 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +static CBlock CreateTestBlock() +{ + DataStream stream{benchmark::data::block413567}; + CBlock block; + stream >> TX_WITH_WITNESS(block); + return block; +} + +static void SaveBlockBench(benchmark::Bench& bench) +{ + const auto testing_setup{MakeNoLogFileContext(ChainType::MAIN)}; + auto& blockman{testing_setup->m_node.chainman->m_blockman}; + const CBlock block{CreateTestBlock()}; + bench.run([&] { + const auto pos{blockman.WriteBlock(block, 413'567)}; + assert(!pos.IsNull()); + }); +} + +static void ReadBlockBench(benchmark::Bench& bench) +{ + const auto testing_setup{MakeNoLogFileContext(ChainType::MAIN)}; + auto& blockman{testing_setup->m_node.chainman->m_blockman}; + const auto pos{blockman.WriteBlock(CreateTestBlock(), 413'567)}; + CBlock block; + bench.run([&] { + const auto success{blockman.ReadBlock(block, pos)}; + assert(success); + }); +} + +static void ReadRawBlockBench(benchmark::Bench& bench) +{ + const auto testing_setup{MakeNoLogFileContext(ChainType::MAIN)}; + auto& blockman{testing_setup->m_node.chainman->m_blockman}; + const auto pos{blockman.WriteBlock(CreateTestBlock(), 413'567)}; + std::vector block_data; + blockman.ReadRawBlock(block_data, pos); // warmup + bench.run([&] { + const auto success{blockman.ReadRawBlock(block_data, pos)}; + assert(success); + }); +} + +BENCHMARK(SaveBlockBench, benchmark::PriorityLevel::HIGH); +BENCHMARK(ReadBlockBench, benchmark::PriorityLevel::HIGH); +BENCHMARK(ReadRawBlockBench, benchmark::PriorityLevel::HIGH); diff --git a/src/bench/rpc_blockchain.cpp b/src/bench/rpc_blockchain.cpp index 7e3e2d8e48..df951a14e4 100644 --- a/src/bench/rpc_blockchain.cpp +++ b/src/bench/rpc_blockchain.cpp @@ -48,8 +48,9 @@ struct TestBlockAndIndex { static void BlockToJsonVerbose(benchmark::Bench& bench) { TestBlockAndIndex data; + const uint256 pow_limit{data.testing_setup->m_node.chainman->GetParams().GetConsensus().powLimit}; bench.run([&] { - auto univalue = blockToJSON(data.testing_setup->m_node.chainman->m_blockman, data.block, data.blockindex, data.blockindex, TxVerbosity::SHOW_DETAILS_AND_PREVOUT); + auto univalue = blockToJSON(data.testing_setup->m_node.chainman->m_blockman, data.block, data.blockindex, data.blockindex, TxVerbosity::SHOW_DETAILS_AND_PREVOUT, pow_limit); ankerl::nanobench::doNotOptimizeAway(univalue); }); } @@ -59,7 +60,8 @@ BENCHMARK(BlockToJsonVerbose, benchmark::PriorityLevel::HIGH); static void BlockToJsonVerboseWrite(benchmark::Bench& bench) { TestBlockAndIndex data; - auto univalue = blockToJSON(data.testing_setup->m_node.chainman->m_blockman, data.block, data.blockindex, data.blockindex, TxVerbosity::SHOW_DETAILS_AND_PREVOUT); + const uint256 pow_limit{data.testing_setup->m_node.chainman->GetParams().GetConsensus().powLimit}; + auto univalue = blockToJSON(data.testing_setup->m_node.chainman->m_blockman, data.block, data.blockindex, data.blockindex, TxVerbosity::SHOW_DETAILS_AND_PREVOUT, pow_limit); bench.run([&] { auto str = univalue.write(); ankerl::nanobench::doNotOptimizeAway(str); diff --git a/src/bench/wallet_migration.cpp b/src/bench/wallet_migration.cpp index eff6c6b526..524839e387 100644 --- a/src/bench/wallet_migration.cpp +++ b/src/bench/wallet_migration.cpp @@ -5,6 +5,7 @@ #include // IWYU pragma: keep #include +#include #include #include #include @@ -16,7 +17,7 @@ #include -#if defined(USE_BDB) && defined(USE_SQLITE) // only enable benchmark when bdb and sqlite are enabled +#if defined(USE_SQLITE) // only enable benchmark when sqlite is enabled namespace wallet{ @@ -32,41 +33,39 @@ static void WalletMigration(benchmark::Bench& bench) int NUM_WATCH_ONLY_ADDR = 20; // Setup legacy wallet - DatabaseOptions options; - options.use_unsafe_sync = true; - options.verify = false; - DatabaseStatus status; - bilingual_str error; - auto database = MakeWalletDatabase(fs::PathToString(test_setup->m_path_root / "legacy"), options, status, error); - uint64_t create_flags = 0; - auto wallet = TestLoadWallet(std::move(database), context, create_flags); + std::unique_ptr wallet = std::make_unique(test_setup->m_node.chain.get(), "", CreateMockableWalletDatabase()); + wallet->chainStateFlushed(ChainstateRole::NORMAL, CBlockLocator{}); + LegacyDataSPKM* legacy_spkm = wallet->GetOrCreateLegacyDataSPKM(); // Add watch-only addresses std::vector scripts_watch_only; for (int w = 0; w < NUM_WATCH_ONLY_ADDR; ++w) { CKey key = GenerateRandomKey(); LOCK(wallet->cs_wallet); - const CScript& script = scripts_watch_only.emplace_back(GetScriptForDestination(GetDestinationForKey(key.GetPubKey(), OutputType::LEGACY))); - bool res = wallet->ImportScriptPubKeys(strprintf("watch_%d", w), {script}, - /*have_solving_data=*/false, /*apply_label=*/true, /*timestamp=*/1); - assert(res); + const auto& dest = GetDestinationForKey(key.GetPubKey(), OutputType::LEGACY); + const CScript& script = scripts_watch_only.emplace_back(GetScriptForDestination(dest)); + assert(legacy_spkm->LoadWatchOnly(script)); + assert(wallet->SetAddressBook(dest, strprintf("watch_%d", w), /*purpose=*/std::nullopt)); } // Generate transactions and local addresses - for (int j = 0; j < 400; ++j) { + for (int j = 0; j < 500; ++j) { + CKey key = GenerateRandomKey(); + CPubKey pubkey = key.GetPubKey(); + // Load key, scripts and create address book record + Assert(legacy_spkm->LoadKey(key, pubkey)); + CTxDestination dest{PKHash(pubkey)}; + Assert(wallet->SetAddressBook(dest, strprintf("legacy_%d", j), /*purpose=*/std::nullopt)); + CMutableTransaction mtx; - mtx.vout.emplace_back(COIN, GetScriptForDestination(*Assert(wallet->GetNewDestination(OutputType::BECH32, strprintf("bench_%d", j))))); - mtx.vout.emplace_back(COIN, GetScriptForDestination(*Assert(wallet->GetNewDestination(OutputType::LEGACY, strprintf("legacy_%d", j))))); + mtx.vout.emplace_back(COIN, GetScriptForDestination(dest)); mtx.vout.emplace_back(COIN, scripts_watch_only.at(j % NUM_WATCH_ONLY_ADDR)); mtx.vin.resize(2); wallet->AddToWallet(MakeTransactionRef(mtx), TxStateInactive{}, /*update_wtx=*/nullptr, /*fFlushOnClose=*/false, /*rescanning_old_block=*/true); } - // Unload so the migration process loads it - TestUnloadWallet(std::move(wallet)); - - bench.epochs(/*numEpochs=*/1).run([&] { - util::Result res = MigrateLegacyToDescriptor(fs::PathToString(test_setup->m_path_root / "legacy"), "", context); + bench.epochs(/*numEpochs=*/1).run([&context, &wallet] { + util::Result res = MigrateLegacyToDescriptor(std::move(wallet), /*passphrase=*/"", context, /*was_loaded=*/false); assert(res); assert(res->wallet); assert(res->watchonly_wallet); diff --git a/src/bitcoind.cpp b/src/bitcoind.cpp index d210e2c8ba..ceb3c99410 100644 --- a/src/bitcoind.cpp +++ b/src/bitcoind.cpp @@ -228,10 +228,10 @@ static bool AppInit(NodeContext& node) return InitError(Untranslated("-daemon is not supported on this operating system")); #endif // HAVE_DECL_FORK } - // Lock data directory after daemonization - if (!AppInitLockDataDirectory()) + // Lock critical directories after daemonization + if (!AppInitLockDirectories()) { - // If locking the data directory failed, exit immediately + // If locking a directory failed, exit immediately return false; } fRet = AppInitInterfaces(node) && AppInitMain(node); diff --git a/src/chain.cpp b/src/chain.cpp index fa82dca58e..272cbf6c81 100644 --- a/src/chain.cpp +++ b/src/chain.cpp @@ -26,7 +26,7 @@ CBlockHeader CBlockIndex::GetBlockHeader(const node::BlockManager& blockman) con have to read the actual *header*, not the full block. */ if (block.IsAuxpow()) { - blockman.ReadBlockHeaderFromDisk(block, *this); + blockman.ReadBlockHeader(block, *this); return block; } diff --git a/src/index/base.cpp b/src/index/base.cpp index a8f9073d9f..1169a1c86b 100644 --- a/src/index/base.cpp +++ b/src/index/base.cpp @@ -188,7 +188,7 @@ void BaseIndex::Sync() CBlock block; interfaces::BlockInfo block_info = kernel::MakeBlockInfo(pindex); - if (!m_chainstate->m_blockman.ReadBlockFromDisk(block, *pindex)) { + if (!m_chainstate->m_blockman.ReadBlock(block, *pindex)) { FatalErrorf("%s: Failed to read block %s from disk", __func__, pindex->GetBlockHash().ToString()); return; @@ -256,7 +256,7 @@ bool BaseIndex::Rewind(const CBlockIndex* current_tip, const CBlockIndex* new_ti // In the case of a reorg, ensure persisted block locator is not stale. // Pruning has a minimum of 288 blocks-to-keep and getting the index // out of sync may be possible but a users fault. - // In case we reorg beyond the pruned depth, ReadBlockFromDisk would + // In case we reorg beyond the pruned depth, ReadBlock would // throw and lead to a graceful shutdown SetBestBlockIndex(new_tip); if (!Commit()) { diff --git a/src/index/blockfilterindex.cpp b/src/index/blockfilterindex.cpp index a808cc9085..5ce85e1f84 100644 --- a/src/index/blockfilterindex.cpp +++ b/src/index/blockfilterindex.cpp @@ -256,7 +256,7 @@ bool BlockFilterIndex::CustomAppend(const interfaces::BlockInfo& block) // pindex variable gives indexing code access to node internals. It // will be removed in upcoming commit const CBlockIndex* pindex = WITH_LOCK(cs_main, return m_chainstate->m_blockman.LookupBlockIndex(block.hash)); - if (!m_chainstate->m_blockman.UndoReadFromDisk(block_undo, *pindex)) { + if (!m_chainstate->m_blockman.ReadBlockUndo(block_undo, *pindex)) { return false; } } diff --git a/src/index/coinstatsindex.cpp b/src/index/coinstatsindex.cpp index c950a18f3f..b5869416b9 100644 --- a/src/index/coinstatsindex.cpp +++ b/src/index/coinstatsindex.cpp @@ -123,7 +123,7 @@ bool CoinStatsIndex::CustomAppend(const interfaces::BlockInfo& block) // pindex variable gives indexing code access to node internals. It // will be removed in upcoming commit const CBlockIndex* pindex = WITH_LOCK(cs_main, return m_chainstate->m_blockman.LookupBlockIndex(block.hash)); - if (!m_chainstate->m_blockman.UndoReadFromDisk(block_undo, *pindex)) { + if (!m_chainstate->m_blockman.ReadBlockUndo(block_undo, *pindex)) { return false; } @@ -287,7 +287,7 @@ bool CoinStatsIndex::CustomRewind(const interfaces::BlockRef& current_tip, const do { CBlock block; - if (!m_chainstate->m_blockman.ReadBlockFromDisk(block, *iter_tip)) { + if (!m_chainstate->m_blockman.ReadBlock(block, *iter_tip)) { LogError("%s: Failed to read block %s from disk\n", __func__, iter_tip->GetBlockHash().ToString()); return false; @@ -415,7 +415,7 @@ bool CoinStatsIndex::ReverseBlock(const CBlock& block, const CBlockIndex* pindex // Ignore genesis block if (pindex->nHeight > 0) { - if (!m_chainstate->m_blockman.UndoReadFromDisk(block_undo, *pindex)) { + if (!m_chainstate->m_blockman.ReadBlockUndo(block_undo, *pindex)) { return false; } diff --git a/src/init.cpp b/src/init.cpp index 116823c36f..09d9de4edc 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -1072,19 +1072,23 @@ bool AppInitParameterInteraction(const ArgsManager& args) return true; } -static bool LockDataDirectory(bool probeOnly) +static bool LockDirectory(const fs::path& dir, bool probeOnly) { - // Make sure only a single Bitcoin process is using the data directory. - const fs::path& datadir = gArgs.GetDataDirNet(); - switch (util::LockDirectory(datadir, ".lock", probeOnly)) { + // Make sure only a single process is using the directory. + switch (util::LockDirectory(dir, ".lock", probeOnly)) { case util::LockResult::ErrorWrite: - return InitError(strprintf(_("Cannot write to data directory '%s'; check permissions."), fs::PathToString(datadir))); + return InitError(strprintf(_("Cannot write to directory '%s'; check permissions."), fs::PathToString(dir))); case util::LockResult::ErrorLock: - return InitError(strprintf(_("Cannot obtain a lock on data directory %s. %s is probably already running."), fs::PathToString(datadir), CLIENT_NAME)); + return InitError(strprintf(_("Cannot obtain a lock on directory %s. %s is probably already running."), fs::PathToString(dir), CLIENT_NAME)); case util::LockResult::Success: return true; } // no default case, so the compiler can warn about missing cases assert(false); } +static bool LockDirectories(bool probeOnly) +{ + return LockDirectory(gArgs.GetDataDirNet(), probeOnly) && \ + LockDirectory(gArgs.GetBlocksDirPath(), probeOnly); +} bool AppInitSanityChecks(const kernel::Context& kernel) { @@ -1099,19 +1103,19 @@ bool AppInitSanityChecks(const kernel::Context& kernel) return InitError(strprintf(_("Elliptic curve cryptography sanity check failure. %s is shutting down."), CLIENT_NAME)); } - // Probe the data directory lock to give an early error message, if possible - // We cannot hold the data directory lock here, as the forking for daemon() hasn't yet happened, - // and a fork will cause weird behavior to it. - return LockDataDirectory(true); + // Probe the directory locks to give an early error message, if possible + // We cannot hold the directory locks here, as the forking for daemon() hasn't yet happened, + // and a fork will cause weird behavior to them. + return LockDirectories(true); } -bool AppInitLockDataDirectory() +bool AppInitLockDirectories() { - // After daemonization get the data directory lock again and hold on to it until exit + // After daemonization get the directory locks again and hold on to them until exit // This creates a slight window for a race condition to happen, however this condition is harmless: it // will at most make us exit without printing a message to console. - if (!LockDataDirectory(false)) { - // Detailed error printed inside LockDataDirectory + if (!LockDirectories(false)) { + // Detailed error printed inside LockDirectory return false; } return true; @@ -1587,7 +1591,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) g_zmq_notification_interface = CZMQNotificationInterface::Create( [&chainman = node.chainman](std::vector& block, const CBlockIndex& index) { assert(chainman); - return chainman->m_blockman.ReadRawBlockFromDisk(block, WITH_LOCK(cs_main, return index.GetBlockPos())); + return chainman->m_blockman.ReadRawBlock(block, WITH_LOCK(cs_main, return index.GetBlockPos())); }); if (g_zmq_notification_interface) { diff --git a/src/init.h b/src/init.h index 6d8a35d80e..6b60a4e147 100644 --- a/src/init.h +++ b/src/init.h @@ -55,11 +55,11 @@ bool AppInitParameterInteraction(const ArgsManager& args); */ bool AppInitSanityChecks(const kernel::Context& kernel); /** - * Lock bitcoin core data directory. + * Lock bitcoin core critical directories. * @note This should only be done after daemonization. Do not call Shutdown() if this function fails. * @pre Parameters should be parsed and config file should be read, AppInitSanityChecks should have been called. */ -bool AppInitLockDataDirectory(); +bool AppInitLockDirectories(); /** * Initialize node and wallet interface pointers. Has no prerequisites or side effects besides allocating memory. */ @@ -67,7 +67,7 @@ bool AppInitInterfaces(node::NodeContext& node); /** * Bitcoin core main initialization. * @note This should only be done after daemonization. Call Shutdown() if this function fails. - * @pre Parameters should be parsed and config file should be read, AppInitLockDataDirectory should have been called. + * @pre Parameters should be parsed and config file should be read, AppInitLockDirectories should have been called. */ bool AppInitMain(node::NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info = nullptr); diff --git a/src/leveldb/CMakeLists.txt b/src/leveldb/CMakeLists.txt index 1cb46256c2..cfd4faa325 100644 --- a/src/leveldb/CMakeLists.txt +++ b/src/leveldb/CMakeLists.txt @@ -28,9 +28,6 @@ option(LEVELDB_BUILD_TESTS "Build LevelDB's unit tests" ON) option(LEVELDB_BUILD_BENCHMARKS "Build LevelDB's benchmarks" ON) option(LEVELDB_INSTALL "Install LevelDB's header and library" ON) -include(TestBigEndian) -test_big_endian(LEVELDB_IS_BIG_ENDIAN) - include(CheckIncludeFile) check_include_file("unistd.h" HAVE_UNISTD_H) diff --git a/src/leveldb/include/leveldb/slice.h b/src/leveldb/include/leveldb/slice.h index 2df417dc31..44de9038c8 100644 --- a/src/leveldb/include/leveldb/slice.h +++ b/src/leveldb/include/leveldb/slice.h @@ -52,6 +52,9 @@ class LEVELDB_EXPORT Slice { // Return true iff the length of the referenced data is zero bool empty() const { return size_ == 0; } + const char* begin() const { return data(); } + const char* end() const { return data() + size(); } + // Return the ith byte in the referenced data. // REQUIRES: n < size() char operator[](size_t n) const { diff --git a/src/leveldb/port/port_config.h.in b/src/leveldb/port/port_config.h.in index 21273153a3..272671d39f 100644 --- a/src/leveldb/port/port_config.h.in +++ b/src/leveldb/port/port_config.h.in @@ -30,10 +30,4 @@ #cmakedefine01 HAVE_SNAPPY #endif // !defined(HAVE_SNAPPY) -// Define to 1 if your processor stores words with the most significant byte -// first (like Motorola and SPARC, unlike Intel and VAX). -#if !defined(LEVELDB_IS_BIG_ENDIAN) -#cmakedefine01 LEVELDB_IS_BIG_ENDIAN -#endif // !defined(LEVELDB_IS_BIG_ENDIAN) - #endif // STORAGE_LEVELDB_PORT_PORT_CONFIG_H_ \ No newline at end of file diff --git a/src/leveldb/port/port_example.h b/src/leveldb/port/port_example.h index 1a8fca24b3..a665910d95 100644 --- a/src/leveldb/port/port_example.h +++ b/src/leveldb/port/port_example.h @@ -18,10 +18,6 @@ namespace port { // TODO(jorlow): Many of these belong more in the environment class rather than // here. We should try moving them and see if it affects perf. -// The following boolean constant must be true on a little-endian machine -// and false otherwise. -static const bool kLittleEndian = true /* or some other expression */; - // ------------------ Threading ------------------- // A Mutex represents an exclusive lock. diff --git a/src/leveldb/port/port_stdcxx.h b/src/leveldb/port/port_stdcxx.h index e9cb0e53af..2bda48db42 100644 --- a/src/leveldb/port/port_stdcxx.h +++ b/src/leveldb/port/port_stdcxx.h @@ -41,8 +41,6 @@ namespace leveldb { namespace port { -static const bool kLittleEndian = !LEVELDB_IS_BIG_ENDIAN; - class CondVar; // Thinly wraps std::mutex. diff --git a/src/leveldb/util/coding.h b/src/leveldb/util/coding.h index 1983ae7173..f0bb57b8e4 100644 --- a/src/leveldb/util/coding.h +++ b/src/leveldb/util/coding.h @@ -48,29 +48,13 @@ int VarintLength(uint64_t v); char* EncodeVarint32(char* dst, uint32_t value); char* EncodeVarint64(char* dst, uint64_t value); -// TODO(costan): Remove port::kLittleEndian and the fast paths based on -// std::memcpy when clang learns to optimize the generic code, as -// described in https://bugs.llvm.org/show_bug.cgi?id=41761 -// -// The platform-independent code in DecodeFixed{32,64}() gets optimized to mov -// on x86 and ldr on ARM64, by both clang and gcc. However, only gcc optimizes -// the platform-independent code in EncodeFixed{32,64}() to mov / str. - // Lower-level versions of Put... that write directly into a character buffer // REQUIRES: dst has enough space for the value being written inline void EncodeFixed32(char* dst, uint32_t value) { uint8_t* const buffer = reinterpret_cast(dst); - if (port::kLittleEndian) { - // Fast path for little-endian CPUs. All major compilers optimize this to a - // single mov (x86_64) / str (ARM) instruction. - std::memcpy(buffer, &value, sizeof(uint32_t)); - return; - } - - // Platform-independent code. - // Currently, only gcc optimizes this to a single mov / str instruction. + // Recent clang and gcc optimize this to a single mov / str instruction. buffer[0] = static_cast(value); buffer[1] = static_cast(value >> 8); buffer[2] = static_cast(value >> 16); @@ -80,15 +64,7 @@ inline void EncodeFixed32(char* dst, uint32_t value) { inline void EncodeFixed64(char* dst, uint64_t value) { uint8_t* const buffer = reinterpret_cast(dst); - if (port::kLittleEndian) { - // Fast path for little-endian CPUs. All major compilers optimize this to a - // single mov (x86_64) / str (ARM) instruction. - std::memcpy(buffer, &value, sizeof(uint64_t)); - return; - } - - // Platform-independent code. - // Currently, only gcc optimizes this to a single mov / str instruction. + // Recent clang and gcc optimize this to a single mov / str instruction. buffer[0] = static_cast(value); buffer[1] = static_cast(value >> 8); buffer[2] = static_cast(value >> 16); @@ -105,16 +81,7 @@ inline void EncodeFixed64(char* dst, uint64_t value) { inline uint32_t DecodeFixed32(const char* ptr) { const uint8_t* const buffer = reinterpret_cast(ptr); - if (port::kLittleEndian) { - // Fast path for little-endian CPUs. All major compilers optimize this to a - // single mov (x86_64) / ldr (ARM) instruction. - uint32_t result; - std::memcpy(&result, buffer, sizeof(uint32_t)); - return result; - } - - // Platform-independent code. - // Clang and gcc optimize this to a single mov / ldr instruction. + // Recent clang and gcc optimize this to a single mov / ldr instruction. return (static_cast(buffer[0])) | (static_cast(buffer[1]) << 8) | (static_cast(buffer[2]) << 16) | @@ -124,16 +91,7 @@ inline uint32_t DecodeFixed32(const char* ptr) { inline uint64_t DecodeFixed64(const char* ptr) { const uint8_t* const buffer = reinterpret_cast(ptr); - if (port::kLittleEndian) { - // Fast path for little-endian CPUs. All major compilers optimize this to a - // single mov (x86_64) / ldr (ARM) instruction. - uint64_t result; - std::memcpy(&result, buffer, sizeof(uint64_t)); - return result; - } - - // Platform-independent code. - // Clang and gcc optimize this to a single mov / ldr instruction. + // Recent clang and gcc optimize this to a single mov / ldr instruction. return (static_cast(buffer[0])) | (static_cast(buffer[1]) << 8) | (static_cast(buffer[2]) << 16) | diff --git a/src/leveldb/util/hash.cc b/src/leveldb/util/hash.cc index dd47c110ee..5432b6180d 100644 --- a/src/leveldb/util/hash.cc +++ b/src/leveldb/util/hash.cc @@ -27,7 +27,7 @@ uint32_t Hash(const char* data, size_t n, uint32_t seed) { uint32_t h = seed ^ (n * m); // Pick up four bytes at a time - while (data + 4 <= limit) { + while (limit - data >= 4) { uint32_t w = DecodeFixed32(data); data += 4; h += w; diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 0280d0bf47..68e11c807b 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -2272,7 +2272,7 @@ void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& // Fast-path: in this case it is possible to serve the block directly from disk, // as the network format matches the format on disk std::vector block_data; - if (!m_chainman.m_blockman.ReadRawBlockFromDisk(block_data, block_pos)) { + if (!m_chainman.m_blockman.ReadRawBlock(block_data, block_pos)) { if (WITH_LOCK(m_chainman.GetMutex(), return m_chainman.m_blockman.IsBlockPruned(*pindex))) { LogDebug(BCLog::NET, "Block was pruned before it could be read, %s\n", pfrom.DisconnectMsg(fLogIPs)); } else { @@ -2286,7 +2286,7 @@ void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& } else { // Send block from disk std::shared_ptr pblockRead = std::make_shared(); - if (!m_chainman.m_blockman.ReadBlockFromDisk(*pblockRead, block_pos)) { + if (!m_chainman.m_blockman.ReadBlock(*pblockRead, block_pos)) { if (WITH_LOCK(m_chainman.GetMutex(), return m_chainman.m_blockman.IsBlockPruned(*pindex))) { LogDebug(BCLog::NET, "Block was pruned before it could be read, %s\n", pfrom.DisconnectMsg(fLogIPs)); } else { @@ -4127,7 +4127,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, if (!block_pos.IsNull()) { CBlock block; - const bool ret{m_chainman.m_blockman.ReadBlockFromDisk(block, block_pos)}; + const bool ret{m_chainman.m_blockman.ReadBlock(block, block_pos)}; // If height is above MAX_BLOCKTXN_DEPTH then this block cannot get // pruned after we release cs_main above, so this read should never fail. assert(ret); @@ -5661,7 +5661,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto) PushMessage(*pto, std::move(cached_cmpctblock_msg.value())); } else { CBlock block; - const bool ret{m_chainman.m_blockman.ReadBlockFromDisk(block, *pBestIndex)}; + const bool ret{m_chainman.m_blockman.ReadBlock(block, *pBestIndex)}; assert(ret); CBlockHeaderAndShortTxIDs cmpctblock{block, m_rng.rand64()}; MakeAndPushMessage(*pto, NetMsgType::CMPCTBLOCK, cmpctblock); diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index dd404d0cff..bf7fd5f542 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -669,41 +669,14 @@ CBlockFileInfo* BlockManager::GetBlockFileInfo(size_t n) return &m_blockfile_info.at(n); } -bool BlockManager::UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const uint256& hashBlock) const -{ - // Open history file to append - AutoFile fileout{OpenUndoFile(pos)}; - if (fileout.IsNull()) { - LogError("%s: OpenUndoFile failed\n", __func__); - return false; - } - - // Write index header - unsigned int nSize = GetSerializeSize(blockundo); - fileout << GetParams().MessageStart() << nSize; - - // Write undo data - long fileOutPos = fileout.tell(); - pos.nPos = (unsigned int)fileOutPos; - fileout << blockundo; - - // calculate & write checksum - HashWriter hasher{}; - hasher << hashBlock; - hasher << blockundo; - fileout << hasher.GetHash(); - - return true; -} - -bool BlockManager::UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex& index) const +bool BlockManager::ReadBlockUndo(CBlockUndo& blockundo, const CBlockIndex& index) const { const FlatFilePos pos{WITH_LOCK(::cs_main, return index.GetUndoPos())}; // Open history file to read AutoFile filein{OpenUndoFile(pos, true)}; if (filein.IsNull()) { - LogError("%s: OpenUndoFile failed for %s\n", __func__, pos.ToString()); + LogError("OpenUndoFile failed for %s", pos.ToString()); return false; } @@ -963,28 +936,7 @@ bool BlockManager::FindUndoPos(BlockValidationState& state, int nFile, FlatFileP return true; } -bool BlockManager::WriteBlockToDisk(const CBlock& block, FlatFilePos& pos) const -{ - // Open history file to append - AutoFile fileout{OpenBlockFile(pos)}; - if (fileout.IsNull()) { - LogError("%s: OpenBlockFile failed\n", __func__); - return false; - } - - // Write index header - unsigned int nSize = GetSerializeSize(TX_WITH_WITNESS(block)); - fileout << GetParams().MessageStart() << nSize; - - // Write block - long fileOutPos = fileout.tell(); - pos.nPos = (unsigned int)fileOutPos; - fileout << TX_WITH_WITNESS(block); - - return true; -} - -bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex& block) +bool BlockManager::WriteBlockUndo(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex& block) { AssertLockHeld(::cs_main); const BlockfileType type = BlockfileTypeForHeight(block.nHeight); @@ -992,33 +944,50 @@ bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValid // Write undo information to disk if (block.GetUndoPos().IsNull()) { - FlatFilePos _pos; - if (!FindUndoPos(state, block.nFile, _pos, ::GetSerializeSize(blockundo) + 40)) { - LogError("%s: FindUndoPos failed\n", __func__); + FlatFilePos pos; + const unsigned int blockundo_size{static_cast(GetSerializeSize(blockundo))}; + if (!FindUndoPos(state, block.nFile, pos, blockundo_size + UNDO_DATA_DISK_OVERHEAD)) { + LogError("FindUndoPos failed"); return false; } - if (!UndoWriteToDisk(blockundo, _pos, block.pprev->GetBlockHash())) { + // Open history file to append + AutoFile fileout{OpenUndoFile(pos)}; + if (fileout.IsNull()) { + LogError("OpenUndoFile failed"); return FatalError(m_opts.notifications, state, _("Failed to write undo data.")); } + + // Write index header + fileout << GetParams().MessageStart() << blockundo_size; + // Write undo data + pos.nPos += BLOCK_SERIALIZATION_HEADER_SIZE; + fileout << blockundo; + + // Calculate & write checksum + HashWriter hasher{}; + hasher << block.pprev->GetBlockHash(); + hasher << blockundo; + fileout << hasher.GetHash(); + // rev files are written in block height order, whereas blk files are written as blocks come in (often out of order) // we want to flush the rev (undo) file once we've written the last block, which is indicated by the last height // in the block file info as below; note that this does not catch the case where the undo writes are keeping up // with the block writes (usually when a synced up node is getting newly mined blocks) -- this case is caught in // the FindNextBlockPos function - if (_pos.nFile < cursor.file_num && static_cast(block.nHeight) == m_blockfile_info[_pos.nFile].nHeightLast) { + if (pos.nFile < cursor.file_num && static_cast(block.nHeight) == m_blockfile_info[pos.nFile].nHeightLast) { // Do not propagate the return code, a failed flush here should not // be an indication for a failed write. If it were propagated here, // the caller would assume the undo data not to be written, when in // fact it is. Note though, that a failed flush might leave the data // file untrimmed. - if (!FlushUndoFile(_pos.nFile, true)) { - LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning, "Failed to flush undo file %05i\n", _pos.nFile); + if (!FlushUndoFile(pos.nFile, true)) { + LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning, "Failed to flush undo file %05i\n", pos.nFile); } - } else if (_pos.nFile == cursor.file_num && block.nHeight > cursor.undo_height) { + } else if (pos.nFile == cursor.file_num && block.nHeight > cursor.undo_height) { cursor.undo_height = block.nHeight; } // update nUndoPos in block index - block.nUndoPos = _pos.nPos; + block.nUndoPos = pos.nPos; block.nStatus |= BLOCK_HAVE_UNDO; m_dirty_blockindex.insert(&block); } @@ -1084,22 +1053,22 @@ bool ReadBlockOrHeader(T& block, const CBlockIndex& index, const BlockManager& b } // anonymous namespace -bool BlockManager::ReadBlockFromDisk(CBlock& block, const FlatFilePos& pos) const +bool BlockManager::ReadBlock(CBlock& block, const FlatFilePos& pos) const { return ReadBlockOrHeader(block, pos, *this); } -bool BlockManager::ReadBlockFromDisk(CBlock& block, const CBlockIndex& index) const +bool BlockManager::ReadBlock(CBlock& block, const CBlockIndex& index) const { return ReadBlockOrHeader(block, index, *this); } -bool BlockManager::ReadBlockHeaderFromDisk(CBlockHeader& block, const CBlockIndex& index) const +bool BlockManager::ReadBlockHeader(CBlockHeader& block, const CBlockIndex& index) const { return ReadBlockOrHeader(block, index, *this); } -bool BlockManager::ReadRawBlockFromDisk(std::vector& block, const FlatFilePos& pos) const +bool BlockManager::ReadRawBlock(std::vector& block, const FlatFilePos& pos) const { FlatFilePos hpos = pos; // If nPos is less than 8 the pos is null and we don't have the block data @@ -1144,22 +1113,27 @@ bool BlockManager::ReadRawBlockFromDisk(std::vector& block, const FlatF return true; } -FlatFilePos BlockManager::SaveBlockToDisk(const CBlock& block, int nHeight) +FlatFilePos BlockManager::WriteBlock(const CBlock& block, int nHeight) { - unsigned int nBlockSize = ::GetSerializeSize(TX_WITH_WITNESS(block)); - // Account for the 4 magic message start bytes + the 4 length bytes (8 bytes total, - // defined as BLOCK_SERIALIZATION_HEADER_SIZE) - nBlockSize += static_cast(BLOCK_SERIALIZATION_HEADER_SIZE); - FlatFilePos blockPos{FindNextBlockPos(nBlockSize, nHeight, block.GetBlockTime())}; - if (blockPos.IsNull()) { - LogError("%s: FindNextBlockPos failed\n", __func__); + const unsigned int block_size{static_cast(GetSerializeSize(TX_WITH_WITNESS(block)))}; + FlatFilePos pos{FindNextBlockPos(block_size + BLOCK_SERIALIZATION_HEADER_SIZE, nHeight, block.GetBlockTime())}; + if (pos.IsNull()) { + LogError("FindNextBlockPos failed"); return FlatFilePos(); } - if (!WriteBlockToDisk(block, blockPos)) { + AutoFile fileout{OpenBlockFile(pos)}; + if (fileout.IsNull()) { + LogError("OpenBlockFile failed"); m_opts.notifications.fatalError(_("Failed to write block.")); return FlatFilePos(); } - return blockPos; + + // Write index header + fileout << GetParams().MessageStart() << block_size; + // Write block + pos.nPos += BLOCK_SERIALIZATION_HEADER_SIZE; + fileout << TX_WITH_WITNESS(block); + return pos; } static auto InitBlocksdirXorKey(const BlockManager::Options& opts) @@ -1168,7 +1142,19 @@ static auto InitBlocksdirXorKey(const BlockManager::Options& opts) // size of the XOR-key file. std::array xor_key{}; - if (opts.use_xor && fs::is_empty(opts.blocks_dir)) { + // Consider this to be the first run if the blocksdir contains only hidden + // files (those which start with a .). Checking for a fully-empty dir would + // be too aggressive as a .lock file may have already been written. + bool first_run = true; + for (const auto& entry : fs::directory_iterator(opts.blocks_dir)) { + const std::string path = fs::PathToString(entry.path().filename()); + if (!entry.is_regular_file() || !path.starts_with('.')) { + first_run = false; + break; + } + } + + if (opts.use_xor && first_run) { // Only use random fresh key when the boolean option is set and on the // very first start of the program. FastRandomContext{}.fillrand(xor_key); diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h index c8d6b7658d..b3e25f5275 100644 --- a/src/node/blockstorage.h +++ b/src/node/blockstorage.h @@ -74,8 +74,11 @@ static const unsigned int UNDOFILE_CHUNK_SIZE = 0x100000; // 1 MiB /** The maximum size of a blk?????.dat file (since 0.8) */ static const unsigned int MAX_BLOCKFILE_SIZE = 0x8000000; // 128 MiB -/** Size of header written by WriteBlockToDisk before a serialized CBlock */ -static constexpr size_t BLOCK_SERIALIZATION_HEADER_SIZE = std::tuple_size_v + sizeof(unsigned int); +/** Size of header written by WriteBlock before a serialized CBlock (8 bytes) */ +static constexpr size_t BLOCK_SERIALIZATION_HEADER_SIZE{std::tuple_size_v + sizeof(unsigned int)}; + +/** Total overhead when writing undo data: header (8 bytes) plus checksum (32 bytes) */ +static constexpr size_t UNDO_DATA_DISK_OVERHEAD{BLOCK_SERIALIZATION_HEADER_SIZE + uint256::size()}; // Because validation code takes pointers to the map's CBlockIndex objects, if // we ever switch to another associative container, we need to either use a @@ -165,7 +168,7 @@ class BlockManager * blockfile info, and checks if there is enough disk space to save the block. * * The nAddSize argument passed to this function should include not just the size of the serialized CBlock, but also the size of - * separator fields which are written before it by WriteBlockToDisk (BLOCK_SERIALIZATION_HEADER_SIZE). + * separator fields (BLOCK_SERIALIZATION_HEADER_SIZE). */ [[nodiscard]] FlatFilePos FindNextBlockPos(unsigned int nAddSize, unsigned int nHeight, uint64_t nTime); [[nodiscard]] bool FlushChainstateBlockFile(int tip_height); @@ -173,15 +176,6 @@ class BlockManager AutoFile OpenUndoFile(const FlatFilePos& pos, bool fReadOnly = false) const; - /** - * Write a block to disk. The pos argument passed to this function is modified by this call. Before this call, it should - * point to an unused file location where separator fields will be written, followed by the serialized CBlock data. - * After this call, it will point to the beginning of the serialized CBlock data, after the separator fields - * (BLOCK_SERIALIZATION_HEADER_SIZE) - */ - bool WriteBlockToDisk(const CBlock& block, FlatFilePos& pos) const; - bool UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const uint256& hashBlock) const; - /* Calculate the block/rev files to delete based on height specified by user with RPC command pruneblockchain */ void FindFilesToPruneManual( std::set& setFilesToPrune, @@ -334,7 +328,7 @@ class BlockManager /** Get block file info entry for one block file */ CBlockFileInfo* GetBlockFileInfo(size_t n); - bool WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex& block) + bool WriteBlockUndo(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex& block) EXCLUSIVE_LOCKS_REQUIRED(::cs_main); /** Store block on disk and update block file statistics. @@ -345,14 +339,13 @@ class BlockManager * @returns in case of success, the position to which the block was written to * in case of an error, an empty FlatFilePos */ - FlatFilePos SaveBlockToDisk(const CBlock& block, int nHeight); + FlatFilePos WriteBlock(const CBlock& block, int nHeight); /** Update blockfile info while processing a block during reindex. The block must be available on disk. * * @param[in] block the block being processed * @param[in] nHeight the height of the block - * @param[in] pos the position of the serialized CBlock on disk. This is the position returned - * by WriteBlockToDisk pointing at the CBlock, not the separator fields before it + * @param[in] pos the position of the serialized CBlock on disk */ void UpdateBlockInfo(const CBlock& block, unsigned int nHeight, const FlatFilePos& pos); @@ -425,12 +418,12 @@ class BlockManager void UnlinkPrunedFiles(const std::set& setFilesToPrune) const; /** Functions for disk access for blocks */ - bool ReadBlockFromDisk(CBlock& block, const FlatFilePos& pos) const; - bool ReadBlockFromDisk(CBlock& block, const CBlockIndex& pindex) const; - bool ReadRawBlockFromDisk(std::vector& block, const FlatFilePos& pos) const; - bool ReadBlockHeaderFromDisk(CBlockHeader& block, const CBlockIndex& pindex) const; + bool ReadBlock(CBlock& block, const FlatFilePos& pos) const; + bool ReadBlock(CBlock& block, const CBlockIndex& pindex) const; + bool ReadRawBlock(std::vector& block, const FlatFilePos& pos) const; + bool ReadBlockHeader(CBlockHeader& block, const CBlockIndex& pindex) const; - bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex& index) const; + bool ReadBlockUndo(CBlockUndo& blockundo, const CBlockIndex& index) const; void CleanupBlockRevFiles() const; }; diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp index f3b8c6a072..7ae2ff6453 100644 --- a/src/node/interfaces.cpp +++ b/src/node/interfaces.cpp @@ -116,7 +116,7 @@ class NodeImpl : public Node m_context->ecc_context = std::make_unique(); if (!AppInitSanityChecks(*m_context->kernel)) return false; - if (!AppInitLockDataDirectory()) return false; + if (!AppInitLockDirectories()) return false; if (!AppInitInterfaces(*m_context)) return false; return true; @@ -442,7 +442,7 @@ bool FillBlock(const CBlockIndex* index, const FoundBlock& block, UniqueLocknHeight] == index ? active[index->nHeight + 1] : nullptr, *block.m_next_block, lock, active, blockman); if (block.m_data) { REVERSE_LOCK(lock); - if (!blockman.ReadBlockFromDisk(*block.m_data, *index)) block.m_data->SetNull(); + if (!blockman.ReadBlock(*block.m_data, *index)) block.m_data->SetNull(); } block.found = true; return true; diff --git a/src/node/miner.cpp b/src/node/miner.cpp index 1ea976b479..d638866241 100644 --- a/src/node/miner.cpp +++ b/src/node/miner.cpp @@ -33,12 +33,10 @@ int64_t UpdateTime(CBlockHeader* pblock, const Consensus::Params& consensusParam int64_t nOldTime = pblock->nTime; int64_t nNewTime{std::max(pindexPrev->GetMedianTimePast() + 1, TicksSinceEpoch(NodeClock::now()))}; - if (consensusParams.enforce_BIP94) { - // Height of block to be mined. - const int height{pindexPrev->nHeight + 1}; - if (height % consensusParams.DifficultyAdjustmentInterval() == 0) { - nNewTime = std::max(nNewTime, pindexPrev->GetBlockTime() - MAX_TIMEWARP); - } + // Height of block to be mined. + const int height{pindexPrev->nHeight + 1}; + if (height % consensusParams.DifficultyAdjustmentInterval() == 0) { + nNewTime = std::max(nNewTime, pindexPrev->GetBlockTime() - MAX_TIMEWARP); } if (nOldTime < nNewTime) { diff --git a/src/node/transaction.cpp b/src/node/transaction.cpp index 0f45da45db..666597391e 100644 --- a/src/node/transaction.cpp +++ b/src/node/transaction.cpp @@ -144,7 +144,7 @@ CTransactionRef GetTransaction(const CBlockIndex* const block_index, const CTxMe } if (block_index) { CBlock block; - if (blockman.ReadBlockFromDisk(block, *block_index)) { + if (blockman.ReadBlock(block, *block_index)) { for (const auto& tx : block.vtx) { if (tx->GetHash() == hash) { hashBlock = block_index->GetBlockHash(); diff --git a/src/pow.cpp b/src/pow.cpp index bbcf39b593..686b177fe3 100644 --- a/src/pow.cpp +++ b/src/pow.cpp @@ -143,7 +143,7 @@ bool CheckProofOfWork(uint256 hash, unsigned int nBits, const Consensus::Params& return CheckProofOfWorkImpl(hash, nBits, params); } -bool CheckProofOfWorkImpl(uint256 hash, unsigned int nBits, const Consensus::Params& params) +std::optional DeriveTarget(unsigned int nBits, const uint256 pow_limit) { bool fNegative; bool fOverflow; @@ -152,8 +152,16 @@ bool CheckProofOfWorkImpl(uint256 hash, unsigned int nBits, const Consensus::Par bnTarget.SetCompact(nBits, &fNegative, &fOverflow); // Check range - if (fNegative || bnTarget == 0 || fOverflow || bnTarget > UintToArith256(params.powLimit)) - return false; + if (fNegative || bnTarget == 0 || fOverflow || bnTarget > UintToArith256(pow_limit)) + return {}; + + return bnTarget; +} + +bool CheckProofOfWorkImpl(uint256 hash, unsigned int nBits, const Consensus::Params& params) +{ + auto bnTarget{DeriveTarget(nBits, params.powLimit)}; + if (!bnTarget) return false; // Check proof of work matches claimed amount if (UintToArith256(hash) > bnTarget) diff --git a/src/pow.h b/src/pow.h index 2b28ade273..ceba55d36a 100644 --- a/src/pow.h +++ b/src/pow.h @@ -13,6 +13,18 @@ class CBlockHeader; class CBlockIndex; class uint256; +class arith_uint256; + +/** + * Convert nBits value to target. + * + * @param[in] nBits compact representation of the target + * @param[in] pow_limit PoW limit (consensus parameter) + * + * @return the proof-of-work target or nullopt if the nBits value + * is invalid (due to overflow or exceeding pow_limit) + */ +std::optional DeriveTarget(unsigned int nBits, const uint256 pow_limit); unsigned int GetNextWorkRequired(const CBlockIndex* pindexLast, const CBlockHeader *pblock, const Consensus::Params&); unsigned int CalculateNextWorkRequired(const CBlockIndex* pindexLast, int64_t nFirstBlockTime, const Consensus::Params&); diff --git a/src/rest.cpp b/src/rest.cpp index 29faf2b05d..f6acca3ac1 100644 --- a/src/rest.cpp +++ b/src/rest.cpp @@ -225,12 +225,11 @@ static bool rest_headers(const std::any& context, const CBlockIndex* tip = nullptr; std::vector headers; headers.reserve(*parsed_count); - const node::BlockManager* blockman = nullptr; + ChainstateManager* maybe_chainman = GetChainman(context, req); + if (!maybe_chainman) return false; + ChainstateManager& chainman = *maybe_chainman; + const node::BlockManager& blockman = chainman.m_blockman; { - ChainstateManager* maybe_chainman = GetChainman(context, req); - if (!maybe_chainman) return false; - ChainstateManager& chainman = *maybe_chainman; - blockman = &chainman.m_blockman; LOCK(cs_main); CChain& active_chain = chainman.ActiveChain(); tip = active_chain.Tip(); @@ -248,7 +247,7 @@ static bool rest_headers(const std::any& context, case RESTResponseFormat::BINARY: { DataStream ssHeader{}; for (const CBlockIndex *pindex : headers) { - ssHeader << pindex->GetBlockHeader(*blockman); + ssHeader << pindex->GetBlockHeader(blockman); } req->WriteHeader("Content-Type", "application/octet-stream"); @@ -259,7 +258,7 @@ static bool rest_headers(const std::any& context, case RESTResponseFormat::HEX: { DataStream ssHeader{}; for (const CBlockIndex *pindex : headers) { - ssHeader << pindex->GetBlockHeader(*blockman); + ssHeader << pindex->GetBlockHeader(blockman); } std::string strHex = HexStr(ssHeader) + "\n"; @@ -270,7 +269,7 @@ static bool rest_headers(const std::any& context, case RESTResponseFormat::JSON: { UniValue jsonHeaders(UniValue::VARR); for (const CBlockIndex *pindex : headers) { - jsonHeaders.push_back(blockheaderToJSON(*blockman, *tip, *pindex)); + jsonHeaders.push_back(blockheaderToJSON(blockman, *tip, *pindex, chainman.GetConsensus().powLimit)); } std::string strJSON = jsonHeaders.write() + "\n"; req->WriteHeader("Content-Type", "application/json"); @@ -321,7 +320,7 @@ static bool rest_block(const std::any& context, } std::vector block_data{}; - if (!chainman.m_blockman.ReadRawBlockFromDisk(block_data, pos)) { + if (!chainman.m_blockman.ReadRawBlock(block_data, pos)) { return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not found"); } @@ -343,7 +342,7 @@ static bool rest_block(const std::any& context, CBlock block{}; DataStream block_stream{block_data}; block_stream >> TX_WITH_WITNESS(block); - UniValue objBlock = blockToJSON(chainman.m_blockman, block, *tip, *pblockindex, tx_verbosity); + UniValue objBlock = blockToJSON(chainman.m_blockman, block, *tip, *pblockindex, tx_verbosity, chainman.GetConsensus().powLimit); std::string strJSON = objBlock.write() + "\n"; req->WriteHeader("Content-Type", "application/json"); req->WriteReply(HTTP_OK, strJSON); diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 035d031034..4f04dfeb05 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -184,7 +184,7 @@ static UniValue blockheaderToJSON(const CPureBlockHeader& header) return result; } -UniValue blockheaderToJSON(const BlockManager& blockman, const CBlockIndex& tip, const CBlockIndex& blockindex) +UniValue blockheaderToJSON(const BlockManager& blockman, const CBlockIndex& tip, const CBlockIndex& blockindex, const uint256 pow_limit) { // Serialize passed information without accessing chain state of the active chain! AssertLockNotHeld(cs_main); // For performance reasons @@ -195,7 +195,8 @@ UniValue blockheaderToJSON(const BlockManager& blockman, const CBlockIndex& tip, int confirmations = ComputeNextBlockAndDepth(tip, blockindex, pnext); result.pushKV("confirmations", confirmations); result.pushKV("height", blockindex.nHeight); - result.pushKV("mediantime", (int64_t)blockindex.GetMedianTimePast()); + result.pushKV("mediantime", blockindex.GetMedianTimePast()); + result.pushKV("target", GetTarget(tip, pow_limit).GetHex()); result.pushKV("chainwork", blockindex.nChainWork.GetHex()); result.pushKV("nTx", (uint64_t)blockindex.nTx); @@ -204,9 +205,9 @@ UniValue blockheaderToJSON(const BlockManager& blockman, const CBlockIndex& tip, return result; } -UniValue blockToJSON(BlockManager& blockman, const CBlock& block, const CBlockIndex& tip, const CBlockIndex& blockindex, TxVerbosity verbosity) +UniValue blockToJSON(BlockManager& blockman, const CBlock& block, const CBlockIndex& tip, const CBlockIndex& blockindex, TxVerbosity verbosity, const uint256 pow_limit) { - UniValue result = blockheaderToJSON(blockman, tip, blockindex); + UniValue result = blockheaderToJSON(blockman, tip, blockindex, pow_limit); result.pushKV("strippedsize", (int)::GetSerializeSize(TX_NO_WITNESS(block))); result.pushKV("size", (int)::GetSerializeSize(TX_WITH_WITNESS(block))); @@ -225,7 +226,7 @@ UniValue blockToJSON(BlockManager& blockman, const CBlock& block, const CBlockIn CBlockUndo blockUndo; const bool is_not_pruned{WITH_LOCK(::cs_main, return !blockman.IsBlockPruned(blockindex))}; bool have_undo{is_not_pruned && WITH_LOCK(::cs_main, return blockindex.nStatus & BLOCK_HAVE_UNDO)}; - if (have_undo && !blockman.UndoReadFromDisk(blockUndo, blockindex)) { + if (have_undo && !blockman.ReadBlockUndo(blockUndo, blockindex)) { throw JSONRPCError(RPC_INTERNAL_ERROR, "Undo data expected but can't be read. This could be due to disk corruption or a conflict with a pruning event."); } for (size_t i = 0; i < block.vtx.size(); ++i) { @@ -622,7 +623,8 @@ static RPCHelpMan getblockheader() {RPCResult::Type::NUM_TIME, "time", "The block time expressed in " + UNIX_EPOCH_TIME}, {RPCResult::Type::NUM_TIME, "mediantime", "The median block time expressed in " + UNIX_EPOCH_TIME}, {RPCResult::Type::NUM, "nonce", "The nonce"}, - {RPCResult::Type::STR_HEX, "bits", "The bits"}, + {RPCResult::Type::STR_HEX, "bits", "nBits: compact representation of the block difficulty target"}, + {RPCResult::Type::STR_HEX, "target", "The difficulty target"}, {RPCResult::Type::NUM, "difficulty", "The difficulty"}, {RPCResult::Type::STR_HEX, "chainwork", "Expected number of hashes required to produce the current chain"}, {RPCResult::Type::NUM, "nTx", "The number of transactions in the block"}, @@ -669,7 +671,7 @@ static RPCHelpMan getblockheader() return strHex; } - auto result = blockheaderToJSON(chainman.m_blockman, *tip, *pblockindex); + auto result = blockheaderToJSON(chainman.m_blockman, *tip, *pblockindex, chainman.GetConsensus().powLimit); if (header.auxpow) result.pushKV("auxpow", AuxpowToJSON(*header.auxpow, fVerbose, chainman.ActiveChainstate())); @@ -701,7 +703,7 @@ static CBlock GetBlockChecked(BlockManager& blockman, const CBlockIndex& blockin CheckBlockDataAvailability(blockman, blockindex, /*check_for_undo=*/false); } - if (!blockman.ReadBlockFromDisk(block, blockindex)) { + if (!blockman.ReadBlock(block, blockindex)) { // Block not found on disk. This shouldn't normally happen unless the block was // pruned right after we released the lock above. throw JSONRPCError(RPC_MISC_ERROR, "Block not found on disk"); @@ -720,7 +722,7 @@ static std::vector GetRawBlockChecked(BlockManager& blockman, const CBl pos = blockindex.GetBlockPos(); } - if (!blockman.ReadRawBlockFromDisk(data, pos)) { + if (!blockman.ReadRawBlock(data, pos)) { // Block not found on disk. This shouldn't normally happen unless the block was // pruned right after we released the lock above. throw JSONRPCError(RPC_MISC_ERROR, "Block not found on disk"); @@ -741,7 +743,7 @@ static CBlockUndo GetUndoChecked(BlockManager& blockman, const CBlockIndex& bloc CheckBlockDataAvailability(blockman, blockindex, /*check_for_undo=*/true); } - if (!blockman.UndoReadFromDisk(blockUndo, blockindex)) { + if (!blockman.ReadBlockUndo(blockUndo, blockindex)) { throw JSONRPCError(RPC_MISC_ERROR, "Can't read undo data from disk"); } @@ -804,7 +806,8 @@ static RPCHelpMan getblock() {RPCResult::Type::NUM_TIME, "time", "The block time expressed in " + UNIX_EPOCH_TIME}, {RPCResult::Type::NUM_TIME, "mediantime", "The median block time expressed in " + UNIX_EPOCH_TIME}, {RPCResult::Type::NUM, "nonce", "The nonce"}, - {RPCResult::Type::STR_HEX, "bits", "The bits"}, + {RPCResult::Type::STR_HEX, "bits", "nBits: compact representation of the block difficulty target"}, + {RPCResult::Type::STR_HEX, "target", "The difficulty target"}, {RPCResult::Type::NUM, "difficulty", "The difficulty"}, {RPCResult::Type::STR_HEX, "chainwork", "Expected number of hashes required to produce the chain up to this block (in hex)"}, {RPCResult::Type::NUM, "nTx", "The number of transactions in the block"}, @@ -880,7 +883,7 @@ static RPCHelpMan getblock() tx_verbosity = TxVerbosity::SHOW_DETAILS_AND_PREVOUT; } - auto result = blockToJSON(chainman.m_blockman, block, *tip, *pblockindex, tx_verbosity); + auto result = blockToJSON(chainman.m_blockman, block, *tip, *pblockindex, tx_verbosity, chainman.GetConsensus().powLimit); if (block.auxpow) result.pushKV("auxpow", AuxpowToJSON(*block.auxpow, verbosity >= 1, chainman.ActiveChainstate())); @@ -1382,6 +1385,8 @@ RPCHelpMan getblockchaininfo() {RPCResult::Type::NUM, "blocks", "the height of the most-work fully-validated chain. The genesis block has height 0"}, {RPCResult::Type::NUM, "headers", "the current number of headers we have validated"}, {RPCResult::Type::STR, "bestblockhash", "the hash of the currently best block"}, + {RPCResult::Type::STR_HEX, "bits", "nBits: compact representation of the block difficulty target"}, + {RPCResult::Type::STR_HEX, "target", "The difficulty target"}, {RPCResult::Type::NUM, "difficulty", "the current difficulty"}, {RPCResult::Type::NUM_TIME, "time", "The block time expressed in " + UNIX_EPOCH_TIME}, {RPCResult::Type::NUM_TIME, "mediantime", "The median block time expressed in " + UNIX_EPOCH_TIME}, @@ -1420,7 +1425,9 @@ RPCHelpMan getblockchaininfo() obj.pushKV("blocks", height); obj.pushKV("headers", chainman.m_best_header ? chainman.m_best_header->nHeight : -1); obj.pushKV("bestblockhash", tip.GetBlockHash().GetHex()); + obj.pushKV("bits", strprintf("%08x", tip.nBits)); obj.pushKV("difficulty", GetDifficultyForBits(tip.nBits)); + obj.pushKV("target", GetTarget(tip, chainman.GetConsensus().powLimit).GetHex()); obj.pushKV("time", tip.GetBlockTime()); obj.pushKV("mediantime", tip.GetMedianTimePast()); obj.pushKV("verificationprogress", chainman.GuessVerificationProgress(&tip)); @@ -3388,6 +3395,8 @@ static RPCHelpMan loadtxoutset() const std::vector RPCHelpForChainstate{ {RPCResult::Type::NUM, "blocks", "number of blocks in this chainstate"}, {RPCResult::Type::STR_HEX, "bestblockhash", "blockhash of the tip"}, + {RPCResult::Type::STR_HEX, "bits", "nBits: compact representation of the block difficulty target"}, + {RPCResult::Type::STR_HEX, "target", "The difficulty target"}, {RPCResult::Type::NUM, "difficulty", "difficulty of the tip"}, {RPCResult::Type::NUM, "verificationprogress", "progress towards the network tip"}, {RPCResult::Type::STR_HEX, "snapshot_blockhash", /*optional=*/true, "the base block of the snapshot this chainstate is based on, if any"}, @@ -3430,6 +3439,8 @@ return RPCHelpMan{ data.pushKV("blocks", (int)chain.Height()); data.pushKV("bestblockhash", tip->GetBlockHash().GetHex()); + data.pushKV("bits", strprintf("%08x", tip->nBits)); + data.pushKV("target", GetTarget(*tip, chainman.GetConsensus().powLimit).GetHex()); data.pushKV("difficulty", GetDifficultyForBits(tip->nBits)); data.pushKV("verificationprogress", chainman.GuessVerificationProgress(tip)); data.pushKV("coins_db_cache_bytes", cs.m_coinsdb_cache_size_bytes); diff --git a/src/rpc/blockchain.h b/src/rpc/blockchain.h index ac079acb81..21c2e16788 100644 --- a/src/rpc/blockchain.h +++ b/src/rpc/blockchain.h @@ -36,10 +36,10 @@ static constexpr int NUM_GETBLOCKSTATS_PERCENTILES = 5; double GetDifficultyForBits(uint32_t nBits); /** Block description to JSON */ -UniValue blockToJSON(node::BlockManager& blockman, const CBlock& block, const CBlockIndex& tip, const CBlockIndex& blockindex, TxVerbosity verbosity) LOCKS_EXCLUDED(cs_main); +UniValue blockToJSON(node::BlockManager& blockman, const CBlock& block, const CBlockIndex& tip, const CBlockIndex& blockindex, TxVerbosity verbosity, const uint256 pow_limit) LOCKS_EXCLUDED(cs_main); /** Block header to JSON */ -UniValue blockheaderToJSON(const node::BlockManager& blockman, const CBlockIndex& tip, const CBlockIndex& blockindex) LOCKS_EXCLUDED(cs_main); +UniValue blockheaderToJSON(const node::BlockManager& blockman, const CBlockIndex& tip, const CBlockIndex& blockindex, const uint256 pow_limit) LOCKS_EXCLUDED(cs_main); /** Used by getblockstats to get feerates at different percentiles by weight */ void CalculatePercentilesByWeight(CAmount result[NUM_GETBLOCKSTATS_PERCENTILES], std::vector>& scores, int64_t total_weight); diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp index e3a2d2c81d..2ef59d933f 100644 --- a/src/rpc/mining.cpp +++ b/src/rpc/mining.cpp @@ -424,11 +424,20 @@ static RPCHelpMan getmininginfo() {RPCResult::Type::NUM, "blocks", "The current block"}, {RPCResult::Type::NUM, "currentblockweight", /*optional=*/true, "The block weight of the last assembled block (only present if a block was ever assembled)"}, {RPCResult::Type::NUM, "currentblocktx", /*optional=*/true, "The number of block transactions of the last assembled block (only present if a block was ever assembled)"}, + {RPCResult::Type::STR_HEX, "bits", "The current nBits, compact representation of the block difficulty target"}, {RPCResult::Type::NUM, "difficulty", "The current difficulty"}, + {RPCResult::Type::STR_HEX, "target", "The current target"}, {RPCResult::Type::NUM, "networkhashps", "The network hashes per second"}, {RPCResult::Type::NUM, "pooledtx", "The size of the mempool"}, {RPCResult::Type::STR, "chain", "current network name (" LIST_CHAIN_NAMES ")"}, {RPCResult::Type::STR_HEX, "signet_challenge", /*optional=*/true, "The block challenge (aka. block script), in hexadecimal (only present if the current network is a signet)"}, + {RPCResult::Type::OBJ, "next", "The next block", + { + {RPCResult::Type::NUM, "height", "The next height"}, + {RPCResult::Type::STR_HEX, "bits", "The next target nBits"}, + {RPCResult::Type::NUM, "difficulty", "The next difficulty"}, + {RPCResult::Type::STR_HEX, "target", "The next target"} + }}, (IsDeprecatedRPCEnabled("warnings") ? RPCResult{RPCResult::Type::STR, "warnings", "any network and blockchain warnings (DEPRECATED)"} : RPCResult{RPCResult::Type::ARR, "warnings", "any network and blockchain warnings (run with `-deprecatedrpc=warnings` to return the latest warning as a single string)", @@ -449,18 +458,32 @@ static RPCHelpMan getmininginfo() ChainstateManager& chainman = EnsureChainman(node); LOCK(cs_main); const CChain& active_chain = chainman.ActiveChain(); + CBlockIndex& tip{*CHECK_NONFATAL(active_chain.Tip())}; UniValue obj(UniValue::VOBJ); obj.pushKV("blocks", active_chain.Height()); if (BlockAssembler::m_last_block_weight) obj.pushKV("currentblockweight", *BlockAssembler::m_last_block_weight); if (BlockAssembler::m_last_block_num_txs) obj.pushKV("currentblocktx", *BlockAssembler::m_last_block_num_txs); - obj.pushKV("difficulty", (double)GetDifficultyForBits(CHECK_NONFATAL(active_chain.Tip())->nBits)); + obj.pushKV("bits", strprintf("%08x", tip.nBits)); + obj.pushKV("difficulty", (double)GetDifficultyForBits(tip.nBits)); + obj.pushKV("target", GetTarget(tip, chainman.GetConsensus().powLimit).GetHex()); obj.pushKV("networkhashps", getnetworkhashps().HandleRequest(request)); obj.pushKV("pooledtx", (uint64_t)mempool.size()); obj.pushKV("chain", chainman.GetParams().GetChainTypeString()); + + UniValue next(UniValue::VOBJ); + CBlockIndex next_index; + NextEmptyBlockIndex(tip, chainman.GetConsensus(), next_index); + + next.pushKV("height", next_index.nHeight); + next.pushKV("bits", strprintf("%08x", next_index.nBits)); + next.pushKV("difficulty", (double)GetDifficultyForBits(next_index.nBits)); + next.pushKV("target", GetTarget(next_index, chainman.GetConsensus().powLimit).GetHex()); + obj.pushKV("next", next); + if (chainman.GetParams().GetChainType() == ChainType::SIGNET) { const std::vector& signet_challenge = - chainman.GetParams().GetConsensus().signet_challenge; + chainman.GetConsensus().signet_challenge; obj.pushKV("signet_challenge", HexStr(signet_challenge)); } obj.pushKV("warnings", node::GetWarningsForRpc(*CHECK_NONFATAL(node.warnings), IsDeprecatedRPCEnabled("warnings"))); diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp index 1b4b770475..b3b7ad1157 100644 --- a/src/rpc/rawtransaction.cpp +++ b/src/rpc/rawtransaction.cpp @@ -389,10 +389,10 @@ static RPCHelpMan getrawtransaction() TxToJSON(*tx, hash_block, result, chainman.ActiveChainstate()); return result; } - if (!chainman.m_blockman.UndoReadFromDisk(blockUndo, *blockindex)) { + if (!chainman.m_blockman.ReadBlockUndo(blockUndo, *blockindex)) { throw JSONRPCError(RPC_INTERNAL_ERROR, "Undo data expected but can't be read. This could be due to disk corruption or a conflict with a pruning event."); } - if (!chainman.m_blockman.ReadBlockFromDisk(block, *blockindex)) { + if (!chainman.m_blockman.ReadBlock(block, *blockindex)) { throw JSONRPCError(RPC_INTERNAL_ERROR, "Block data expected but can't be read. This could be due to disk corruption or a conflict with a pruning event."); } diff --git a/src/rpc/server_util.cpp b/src/rpc/server_util.cpp index 5f99ec85a0..dec8bd8ae3 100644 --- a/src/rpc/server_util.cpp +++ b/src/rpc/server_util.cpp @@ -4,10 +4,13 @@ #include +#include #include #include #include +#include #include +#include #include #include #include @@ -17,6 +20,7 @@ #include using node::NodeContext; +using node::UpdateTime; NodeContext& EnsureAnyNodeContext(const std::any& context) { @@ -144,3 +148,18 @@ AddrMan& EnsureAnyAddrman(const std::any& context) { return EnsureAddrman(EnsureAnyNodeContext(context)); } + +void NextEmptyBlockIndex(CBlockIndex& tip, const Consensus::Params& consensusParams, CBlockIndex& next_index) +{ + CBlockHeader next_header{}; + next_header.hashPrevBlock = tip.GetBlockHash(); + UpdateTime(&next_header, consensusParams, &tip); + next_header.nBits = GetNextWorkRequired(&tip, &next_header, consensusParams); + next_header.nNonce = 0; + + next_index.pprev = &tip; + next_index.nTime = next_header.nTime; + next_index.nBits = next_header.nBits; + next_index.nNonce = next_header.nNonce; + next_index.nHeight = tip.nHeight + 1; +} diff --git a/src/rpc/server_util.h b/src/rpc/server_util.h index 9a79603b70..1a6b7faa27 100644 --- a/src/rpc/server_util.h +++ b/src/rpc/server_util.h @@ -7,8 +7,11 @@ #include +#include + class AddrMan; class ArgsManager; +class CBlockIndex; class CBlockPolicyEstimator; class CConnman; class CTxMemPool; @@ -41,4 +44,7 @@ PeerManager& EnsurePeerman(const node::NodeContext& node); AddrMan& EnsureAddrman(const node::NodeContext& node); AddrMan& EnsureAnyAddrman(const std::any& context); +/** Return an empty block index on top of the tip, with height, time and nBits set */ +void NextEmptyBlockIndex(CBlockIndex& tip, const Consensus::Params& consensusParams, CBlockIndex& next_index); + #endif // BITCOIN_RPC_SERVER_UTIL_H diff --git a/src/rpc/txoutproof.cpp b/src/rpc/txoutproof.cpp index 40294fda06..77fd22000c 100644 --- a/src/rpc/txoutproof.cpp +++ b/src/rpc/txoutproof.cpp @@ -102,7 +102,7 @@ static RPCHelpMan gettxoutproof() CheckBlockDataAvailability(chainman.m_blockman, *pblockindex, /*check_for_undo=*/false); } CBlock block; - if (!chainman.m_blockman.ReadBlockFromDisk(block, *pblockindex)) { + if (!chainman.m_blockman.ReadBlock(block, *pblockindex)) { throw JSONRPCError(RPC_INTERNAL_ERROR, "Can't read block from disk"); } diff --git a/src/rpc/util.cpp b/src/rpc/util.cpp index b1fbc25641..2941dda8c0 100644 --- a/src/rpc/util.cpp +++ b/src/rpc/util.cpp @@ -4,6 +4,7 @@ #include // IWYU pragma: keep +#include #include #include #include @@ -13,6 +14,7 @@ #include #include #include +#include #include #include