From 733fa0b0a140fc1e40c644a29953db090baa2890 Mon Sep 17 00:00:00 2001 From: Antoine Poinsot Date: Tue, 26 Nov 2024 12:47:13 -0500 Subject: [PATCH 01/62] miner: never create a template which exploits the timewarp bug --- src/node/miner.cpp | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/node/miner.cpp b/src/node/miner.cpp index 790ee1c146..2f949f2884 100644 --- a/src/node/miner.cpp +++ b/src/node/miner.cpp @@ -33,12 +33,10 @@ int64_t UpdateTime(CBlockHeader* pblock, const Consensus::Params& consensusParam int64_t nOldTime = pblock->nTime; int64_t nNewTime{std::max(pindexPrev->GetMedianTimePast() + 1, TicksSinceEpoch(NodeClock::now()))}; - if (consensusParams.enforce_BIP94) { - // Height of block to be mined. - const int height{pindexPrev->nHeight + 1}; - if (height % consensusParams.DifficultyAdjustmentInterval() == 0) { - nNewTime = std::max(nNewTime, pindexPrev->GetBlockTime() - MAX_TIMEWARP); - } + // Height of block to be mined. + const int height{pindexPrev->nHeight + 1}; + if (height % consensusParams.DifficultyAdjustmentInterval() == 0) { + nNewTime = std::max(nNewTime, pindexPrev->GetBlockTime() - MAX_TIMEWARP); } if (nOldTime < nNewTime) { From fad83e759a4a6221fb3b067657f847894e5a2f20 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Wed, 4 Dec 2024 10:15:23 +0100 Subject: [PATCH 02/62] doc: Fix incorrect send RPC docs --- src/wallet/rpc/spend.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/wallet/rpc/spend.cpp b/src/wallet/rpc/spend.cpp index bea9b2eec1..ec0bf29c3b 100644 --- a/src/wallet/rpc/spend.cpp +++ b/src/wallet/rpc/spend.cpp @@ -1226,16 +1226,18 @@ RPCHelpMan send() {"include_watching", RPCArg::Type::BOOL, RPCArg::DefaultHint{"true for watch-only wallets, otherwise false"}, "Also select inputs which are watch only.\n" "Only solvable inputs can be used. Watch-only destinations are solvable if the public key and/or output script was imported,\n" "e.g. with 'importpubkey' or 'importmulti' with the 'pubkeys' or 'desc' field."}, - {"inputs", RPCArg::Type::ARR, RPCArg::Default{UniValue::VARR}, "Specify inputs instead of adding them automatically. A JSON array of JSON objects", + {"inputs", RPCArg::Type::ARR, RPCArg::Default{UniValue::VARR}, "Specify inputs instead of adding them automatically.", { + {"", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED, "", { {"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The transaction id"}, {"vout", RPCArg::Type::NUM, RPCArg::Optional::NO, "The output number"}, - {"sequence", RPCArg::Type::NUM, RPCArg::Optional::NO, "The sequence number"}, + {"sequence", RPCArg::Type::NUM, RPCArg::DefaultHint{"depends on the value of the 'replaceable' and 'locktime' arguments"}, "The sequence number"}, {"weight", RPCArg::Type::NUM, RPCArg::DefaultHint{"Calculated from wallet and solving data"}, "The maximum weight for this input, " "including the weight of the outpoint and sequence number. " "Note that signature sizes are not guaranteed to be consistent, " "so the maximum DER signatures size of 73 bytes should be used when considering ECDSA signatures." "Remember to convert serialized sizes to weight units when necessary."}, + }}, }, }, {"locktime", RPCArg::Type::NUM, RPCArg::Default{0}, "Raw locktime. Non-0 value also locktime-activates inputs"}, From 18619b473255786b80f27dd33b46eb26a63fffc7 Mon Sep 17 00:00:00 2001 From: furszy Date: Wed, 6 Nov 2024 13:33:51 -0500 Subject: [PATCH 03/62] wallet: remove BDB dependency from wallet migration benchmark Stops creating a bdb database in the wallet migration benchmark. Instead, the benchmark now creates the db in memory and re-uses it for the migration process. --- src/bench/wallet_migration.cpp | 41 +++++++++++++++++----------------- src/wallet/wallet.cpp | 20 +++++++++++++---- src/wallet/wallet.h | 2 ++ 3 files changed, 38 insertions(+), 25 deletions(-) diff --git a/src/bench/wallet_migration.cpp b/src/bench/wallet_migration.cpp index eff6c6b526..524839e387 100644 --- a/src/bench/wallet_migration.cpp +++ b/src/bench/wallet_migration.cpp @@ -5,6 +5,7 @@ #include // IWYU pragma: keep #include +#include #include #include #include @@ -16,7 +17,7 @@ #include -#if defined(USE_BDB) && defined(USE_SQLITE) // only enable benchmark when bdb and sqlite are enabled +#if defined(USE_SQLITE) // only enable benchmark when sqlite is enabled namespace wallet{ @@ -32,41 +33,39 @@ static void WalletMigration(benchmark::Bench& bench) int NUM_WATCH_ONLY_ADDR = 20; // Setup legacy wallet - DatabaseOptions options; - options.use_unsafe_sync = true; - options.verify = false; - DatabaseStatus status; - bilingual_str error; - auto database = MakeWalletDatabase(fs::PathToString(test_setup->m_path_root / "legacy"), options, status, error); - uint64_t create_flags = 0; - auto wallet = TestLoadWallet(std::move(database), context, create_flags); + std::unique_ptr wallet = std::make_unique(test_setup->m_node.chain.get(), "", CreateMockableWalletDatabase()); + wallet->chainStateFlushed(ChainstateRole::NORMAL, CBlockLocator{}); + LegacyDataSPKM* legacy_spkm = wallet->GetOrCreateLegacyDataSPKM(); // Add watch-only addresses std::vector scripts_watch_only; for (int w = 0; w < NUM_WATCH_ONLY_ADDR; ++w) { CKey key = GenerateRandomKey(); LOCK(wallet->cs_wallet); - const CScript& script = scripts_watch_only.emplace_back(GetScriptForDestination(GetDestinationForKey(key.GetPubKey(), OutputType::LEGACY))); - bool res = wallet->ImportScriptPubKeys(strprintf("watch_%d", w), {script}, - /*have_solving_data=*/false, /*apply_label=*/true, /*timestamp=*/1); - assert(res); + const auto& dest = GetDestinationForKey(key.GetPubKey(), OutputType::LEGACY); + const CScript& script = scripts_watch_only.emplace_back(GetScriptForDestination(dest)); + assert(legacy_spkm->LoadWatchOnly(script)); + assert(wallet->SetAddressBook(dest, strprintf("watch_%d", w), /*purpose=*/std::nullopt)); } // Generate transactions and local addresses - for (int j = 0; j < 400; ++j) { + for (int j = 0; j < 500; ++j) { + CKey key = GenerateRandomKey(); + CPubKey pubkey = key.GetPubKey(); + // Load key, scripts and create address book record + Assert(legacy_spkm->LoadKey(key, pubkey)); + CTxDestination dest{PKHash(pubkey)}; + Assert(wallet->SetAddressBook(dest, strprintf("legacy_%d", j), /*purpose=*/std::nullopt)); + CMutableTransaction mtx; - mtx.vout.emplace_back(COIN, GetScriptForDestination(*Assert(wallet->GetNewDestination(OutputType::BECH32, strprintf("bench_%d", j))))); - mtx.vout.emplace_back(COIN, GetScriptForDestination(*Assert(wallet->GetNewDestination(OutputType::LEGACY, strprintf("legacy_%d", j))))); + mtx.vout.emplace_back(COIN, GetScriptForDestination(dest)); mtx.vout.emplace_back(COIN, scripts_watch_only.at(j % NUM_WATCH_ONLY_ADDR)); mtx.vin.resize(2); wallet->AddToWallet(MakeTransactionRef(mtx), TxStateInactive{}, /*update_wtx=*/nullptr, /*fFlushOnClose=*/false, /*rescanning_old_block=*/true); } - // Unload so the migration process loads it - TestUnloadWallet(std::move(wallet)); - - bench.epochs(/*numEpochs=*/1).run([&] { - util::Result res = MigrateLegacyToDescriptor(fs::PathToString(test_setup->m_path_root / "legacy"), "", context); + bench.epochs(/*numEpochs=*/1).run([&context, &wallet] { + util::Result res = MigrateLegacyToDescriptor(std::move(wallet), /*passphrase=*/"", context, /*was_loaded=*/false); assert(res); assert(res->wallet); assert(res->watchonly_wallet); diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 45b866d60d..ad372da368 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -4380,9 +4380,8 @@ bool DoMigration(CWallet& wallet, WalletContext& context, bilingual_str& error, util::Result MigrateLegacyToDescriptor(const std::string& wallet_name, const SecureString& passphrase, WalletContext& context) { - MigrationResult res; - bilingual_str error; std::vector warnings; + bilingual_str error; // If the wallet is still loaded, unload it so that nothing else tries to use it while we're changing it bool was_loaded = false; @@ -4431,10 +4430,23 @@ util::Result MigrateLegacyToDescriptor(const std::string& walle return util::Error{Untranslated("Wallet loading failed.") + Untranslated(" ") + error}; } + return MigrateLegacyToDescriptor(std::move(local_wallet), passphrase, context, was_loaded); +} + +util::Result MigrateLegacyToDescriptor(std::shared_ptr local_wallet, const SecureString& passphrase, WalletContext& context, bool was_loaded) +{ + MigrationResult res; + bilingual_str error; + std::vector warnings; + + DatabaseOptions options; + options.require_existing = true; + DatabaseStatus status; + + const std::string wallet_name = local_wallet->GetName(); + // Helper to reload as normal for some of our exit scenarios const auto& reload_wallet = [&](std::shared_ptr& to_reload) { - // Reset options.require_format as wallets of any format may be reloaded. - options.require_format = std::nullopt; assert(to_reload.use_count() == 1); std::string name = to_reload->GetName(); to_reload.reset(); diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h index d869f031bb..36d6acba75 100644 --- a/src/wallet/wallet.h +++ b/src/wallet/wallet.h @@ -1134,6 +1134,8 @@ struct MigrationResult { //! Do all steps to migrate a legacy wallet to a descriptor wallet [[nodiscard]] util::Result MigrateLegacyToDescriptor(const std::string& wallet_name, const SecureString& passphrase, WalletContext& context); +//! Requirement: The wallet provided to this function must be isolated, with no attachment to the node's context. +[[nodiscard]] util::Result MigrateLegacyToDescriptor(std::shared_ptr local_wallet, const SecureString& passphrase, WalletContext& context, bool was_loaded); } // namespace wallet #endif // BITCOIN_WALLET_WALLET_H From d38ade7bc409207bf425e05ee10b633b0aef4c36 Mon Sep 17 00:00:00 2001 From: Hennadii Stepanov <32963518+hebasto@users.noreply.github.com> Date: Thu, 19 Dec 2024 12:53:48 +0000 Subject: [PATCH 04/62] qa: Use `sys.executable` when invoking other Python scripts This change fixes tests on systems where `python3` is not available in the `PATH`, causing the shebang `#!/usr/bin/env python3` to fail. --- test/functional/rpc_signer.py | 6 ++---- test/functional/wallet_signer.py | 17 ++++------------- 2 files changed, 6 insertions(+), 17 deletions(-) diff --git a/test/functional/rpc_signer.py b/test/functional/rpc_signer.py index 7dd16ea8fe..715520b150 100755 --- a/test/functional/rpc_signer.py +++ b/test/functional/rpc_signer.py @@ -9,6 +9,7 @@ """ import os import platform +import sys from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( @@ -20,10 +21,7 @@ class RPCSignerTest(BitcoinTestFramework): def mock_signer_path(self): path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'mocks', 'signer.py') - if platform.system() == "Windows": - return "py -3 " + path - else: - return path + return sys.executable + " " + path def set_test_params(self): self.num_nodes = 4 diff --git a/test/functional/wallet_signer.py b/test/functional/wallet_signer.py index 52b4c390b8..fe1c7953c2 100755 --- a/test/functional/wallet_signer.py +++ b/test/functional/wallet_signer.py @@ -8,7 +8,7 @@ See also rpc_signer.py for tests without wallet context. """ import os -import platform +import sys from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( @@ -24,24 +24,15 @@ def add_options(self, parser): def mock_signer_path(self): path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'mocks', 'signer.py') - if platform.system() == "Windows": - return "py -3 " + path - else: - return path + return sys.executable + " " + path def mock_invalid_signer_path(self): path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'mocks', 'invalid_signer.py') - if platform.system() == "Windows": - return "py -3 " + path - else: - return path + return sys.executable + " " + path def mock_multi_signers_path(self): path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'mocks', 'multi_signers.py') - if platform.system() == "Windows": - return "py -3 " + path - else: - return path + return sys.executable + " " + path def set_test_params(self): self.num_nodes = 2 From 1b51616f2e3b58a1c63a19e6dba8e7e9c2aefdeb Mon Sep 17 00:00:00 2001 From: i-am-yuvi Date: Fri, 3 Jan 2025 19:09:03 +0530 Subject: [PATCH 05/62] test: improve rogue calls in mining functions --- test/functional/test_framework/test_framework.py | 8 ++++---- test/functional/test_framework/test_node.py | 12 ++++++------ 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py index 7e8c40cf16..c4ec3685c5 100755 --- a/test/functional/test_framework/test_framework.py +++ b/test/functional/test_framework/test_framework.py @@ -716,22 +716,22 @@ def no_op(self): pass def generate(self, generator, *args, sync_fun=None, **kwargs): - blocks = generator.generate(*args, invalid_call=False, **kwargs) + blocks = generator.generate(*args, called_by_framework=True, **kwargs) sync_fun() if sync_fun else self.sync_all() return blocks def generateblock(self, generator, *args, sync_fun=None, **kwargs): - blocks = generator.generateblock(*args, invalid_call=False, **kwargs) + blocks = generator.generateblock(*args, called_by_framework=True, **kwargs) sync_fun() if sync_fun else self.sync_all() return blocks def generatetoaddress(self, generator, *args, sync_fun=None, **kwargs): - blocks = generator.generatetoaddress(*args, invalid_call=False, **kwargs) + blocks = generator.generatetoaddress(*args, called_by_framework=True, **kwargs) sync_fun() if sync_fun else self.sync_all() return blocks def generatetodescriptor(self, generator, *args, sync_fun=None, **kwargs): - blocks = generator.generatetodescriptor(*args, invalid_call=False, **kwargs) + blocks = generator.generatetodescriptor(*args, called_by_framework=True, **kwargs) sync_fun() if sync_fun else self.sync_all() return blocks diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py index d896a421d4..d67b5bc7a5 100755 --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -352,16 +352,16 @@ def generate(self, nblocks, maxtries=1000000, **kwargs): self.log.debug("TestNode.generate() dispatches `generate` call to `generatetoaddress`") return self.generatetoaddress(nblocks=nblocks, address=self.get_deterministic_priv_key().address, maxtries=maxtries, **kwargs) - def generateblock(self, *args, invalid_call, **kwargs): - assert not invalid_call + def generateblock(self, *args, called_by_framework, **kwargs): + assert called_by_framework, "Direct call of this mining RPC is discouraged. Please use one of the self.generate* methods on the test framework, which sync the nodes to avoid intermittent test issues. You may use sync_fun=self.no_op to disable the sync explicitly." return self.__getattr__('generateblock')(*args, **kwargs) - def generatetoaddress(self, *args, invalid_call, **kwargs): - assert not invalid_call + def generatetoaddress(self, *args, called_by_framework, **kwargs): + assert called_by_framework, "Direct call of this mining RPC is discouraged. Please use one of the self.generate* methods on the test framework, which sync the nodes to avoid intermittent test issues. You may use sync_fun=self.no_op to disable the sync explicitly." return self.__getattr__('generatetoaddress')(*args, **kwargs) - def generatetodescriptor(self, *args, invalid_call, **kwargs): - assert not invalid_call + def generatetodescriptor(self, *args, called_by_framework, **kwargs): + assert called_by_framework, "Direct call of this mining RPC is discouraged. Please use one of the self.generate* methods on the test framework, which sync the nodes to avoid intermittent test issues. You may use sync_fun=self.no_op to disable the sync explicitly." return self.__getattr__('generatetodescriptor')(*args, **kwargs) def setmocktime(self, timestamp): From 092569e8580b7c2c13b6cc9d29bcb4c5e85bbb44 Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Wed, 1 Jan 2025 23:17:44 -0500 Subject: [PATCH 06/62] descriptor: Try the other parity in ConstPubkeyProvider::GetPrivKey() GetPrivKey() needs the same handling of all keyids for xonly keys that ToPrivateString() does. Refactor that into GetPrivKey() and reuse it in ToPrivateString() to resolve this. --- src/script/descriptor.cpp | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/src/script/descriptor.cpp b/src/script/descriptor.cpp index 2e1a30744e..687fc72d1b 100644 --- a/src/script/descriptor.cpp +++ b/src/script/descriptor.cpp @@ -312,15 +312,7 @@ class ConstPubkeyProvider final : public PubkeyProvider bool ToPrivateString(const SigningProvider& arg, std::string& ret) const override { CKey key; - if (m_xonly) { - for (const auto& keyid : XOnlyPubKey(m_pubkey).GetKeyIDs()) { - arg.GetKey(keyid, key); - if (key.IsValid()) break; - } - } else { - arg.GetKey(m_pubkey.GetID(), key); - } - if (!key.IsValid()) return false; + if (!GetPrivKey(/*pos=*/0, arg, key)) return false; ret = EncodeSecret(key); return true; } @@ -331,7 +323,8 @@ class ConstPubkeyProvider final : public PubkeyProvider } bool GetPrivKey(int pos, const SigningProvider& arg, CKey& key) const override { - return arg.GetKey(m_pubkey.GetID(), key); + return m_xonly ? arg.GetKeyByXOnly(XOnlyPubKey(m_pubkey), key) : + arg.GetKey(m_pubkey.GetID(), key); } std::optional GetRootPubKey() const override { From 4c50c21f6bfc1d88846be571055b481ab14b086f Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Wed, 1 Jan 2025 23:52:33 -0500 Subject: [PATCH 07/62] tests: Check ExpandPrivate matches for both parsed descriptors --- src/script/signingprovider.h | 2 ++ src/test/descriptor_tests.cpp | 18 ++++++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/src/script/signingprovider.h b/src/script/signingprovider.h index efdfd9ee56..5b1da681f8 100644 --- a/src/script/signingprovider.h +++ b/src/script/signingprovider.h @@ -136,6 +136,8 @@ class TaprootBuilder std::vector>> GetTreeTuples() const; /** Returns true if there are any tapscripts */ bool HasScripts() const { return !m_branch.empty(); } + + bool operator==(const TaprootBuilder& other) const { return GetTreeTuples() == other.GetTreeTuples(); } }; /** Given a TaprootSpendData and the output key, reconstruct its script tree. diff --git a/src/test/descriptor_tests.cpp b/src/test/descriptor_tests.cpp index 288ffc66eb..5d4bb2ef11 100644 --- a/src/test/descriptor_tests.cpp +++ b/src/test/descriptor_tests.cpp @@ -62,6 +62,15 @@ bool EqualDescriptor(std::string a, std::string b) return a == b; } +bool EqualSigningProviders(const FlatSigningProvider& a, const FlatSigningProvider& b) +{ + return a.scripts == b.scripts + && a.pubkeys == b.pubkeys + && a.origins == b.origins + && a.keys == b.keys + && a.tr_trees == b.tr_trees; +} + std::string UseHInsteadOfApostrophe(const std::string& desc) { std::string ret = desc; @@ -214,6 +223,15 @@ void DoCheck(std::string prv, std::string pub, const std::string& norm_pub, int BOOST_CHECK_MESSAGE(EqualDescriptor(prv, prv1), "Private ser: " + prv1 + " Private desc: " + prv); } BOOST_CHECK(!parse_pub->ToPrivateString(keys_pub, prv1)); + + // Check that both can ExpandPrivate and get the same SigningProviders + FlatSigningProvider priv_prov; + parse_priv->ExpandPrivate(0, keys_priv, priv_prov); + + FlatSigningProvider pub_prov; + parse_pub->ExpandPrivate(0, keys_priv, pub_prov); + + BOOST_CHECK_MESSAGE(EqualSigningProviders(priv_prov, pub_prov), "Private desc: " + prv + " Pub desc: " + pub); } // Check that private can produce the normalized descriptors From b4ac48090f259dbef567b49fa36a8bf192209710 Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Mon, 6 Jan 2025 19:10:15 -0500 Subject: [PATCH 08/62] descriptor: Use InferXOnlyPubkey for miniscript XOnly pubkey from script --- src/script/descriptor.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/script/descriptor.cpp b/src/script/descriptor.cpp index 687fc72d1b..499af47ee5 100644 --- a/src/script/descriptor.cpp +++ b/src/script/descriptor.cpp @@ -1709,7 +1709,7 @@ struct KeyParser { if (miniscript::IsTapscript(m_script_ctx) && end - begin == 32) { XOnlyPubKey pubkey; std::copy(begin, end, pubkey.begin()); - if (auto pubkey_provider = InferPubkey(pubkey.GetEvenCorrespondingCPubKey(), ParseContext(), *m_in)) { + if (auto pubkey_provider = InferXOnlyPubkey(pubkey, ParseContext(), *m_in)) { m_keys.emplace_back(); m_keys.back().push_back(std::move(pubkey_provider)); return key; From c0045e6cee06bc0029fb79b5a531aa1f2b817424 Mon Sep 17 00:00:00 2001 From: David Gumberg Date: Mon, 6 Jan 2025 14:25:44 -0800 Subject: [PATCH 09/62] Add test for multipath miniscript expression --- src/test/descriptor_tests.cpp | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/src/test/descriptor_tests.cpp b/src/test/descriptor_tests.cpp index 5d4bb2ef11..1b4020e0a0 100644 --- a/src/test/descriptor_tests.cpp +++ b/src/test/descriptor_tests.cpp @@ -826,6 +826,31 @@ BOOST_AUTO_TEST_CASE(descriptor_test) {{0x80000000UL + 48, 0x80000000UL + 1, 0x80000000UL, 0x80000000UL + 2, 1, 0}, {0x80000000UL + 48, 0x80000000UL + 1, 0x80000000UL, 0x80000000UL + 2, 1, 1}, {0x80000000UL + 48, 0x80000000UL + 1, 0x80000000UL, 0x80000000UL + 2, 1, 2}}, } ); + CheckMultipath("tr(xprv9yYge4PS54XkYT9KiLfCRwc8Jeuz8DucxQGtuEecJZYhKNiqbPxYHTPzXtskmzWBqdqkRAGsghNmZzNsfU2wstaB3XjDQFPv567aQSSuPyo,l:pk(xprvA1ADjaN8H3HGnZSmt4VF7YdWoV9oNq8jhqhurxsrYycBAFK555cECoaY22KWt6BTRNLuvobW5VQTF89PN3iA485LAg7epazevPyjCa4xTzd/<2;3>))", + "tr(xpub6CY33ZvKuS63kwDnpNCCo5YrrgkUXgdUKdCVhd4Dru5gCB3z8wGnqFiUP98Za5pYSYF5KmvBHTY3Ra8FAJGggzBjuHS69WzN8gscPupuZwK,l:pk(xpub6E9a95u27Qqa13XEz62FUgaFMWzHnHrb54dWfMHU7K9A33eDccvUkbu1sHYoByHAgJdR326rWqn9pGZgZHz1afDprW5gGwS4gUX8Ri6aGPZ/<2;3>))", + { + "tr(xprv9yYge4PS54XkYT9KiLfCRwc8Jeuz8DucxQGtuEecJZYhKNiqbPxYHTPzXtskmzWBqdqkRAGsghNmZzNsfU2wstaB3XjDQFPv567aQSSuPyo,l:pk(xprvA1ADjaN8H3HGnZSmt4VF7YdWoV9oNq8jhqhurxsrYycBAFK555cECoaY22KWt6BTRNLuvobW5VQTF89PN3iA485LAg7epazevPyjCa4xTzd/2))", + "tr(xprv9yYge4PS54XkYT9KiLfCRwc8Jeuz8DucxQGtuEecJZYhKNiqbPxYHTPzXtskmzWBqdqkRAGsghNmZzNsfU2wstaB3XjDQFPv567aQSSuPyo,l:pk(xprvA1ADjaN8H3HGnZSmt4VF7YdWoV9oNq8jhqhurxsrYycBAFK555cECoaY22KWt6BTRNLuvobW5VQTF89PN3iA485LAg7epazevPyjCa4xTzd/3))", + }, + { + "tr(xpub6CY33ZvKuS63kwDnpNCCo5YrrgkUXgdUKdCVhd4Dru5gCB3z8wGnqFiUP98Za5pYSYF5KmvBHTY3Ra8FAJGggzBjuHS69WzN8gscPupuZwK,l:pk(xpub6E9a95u27Qqa13XEz62FUgaFMWzHnHrb54dWfMHU7K9A33eDccvUkbu1sHYoByHAgJdR326rWqn9pGZgZHz1afDprW5gGwS4gUX8Ri6aGPZ/2))", + "tr(xpub6CY33ZvKuS63kwDnpNCCo5YrrgkUXgdUKdCVhd4Dru5gCB3z8wGnqFiUP98Za5pYSYF5KmvBHTY3Ra8FAJGggzBjuHS69WzN8gscPupuZwK,l:pk(xpub6E9a95u27Qqa13XEz62FUgaFMWzHnHrb54dWfMHU7K9A33eDccvUkbu1sHYoByHAgJdR326rWqn9pGZgZHz1afDprW5gGwS4gUX8Ri6aGPZ/3))", + }, + { + "tr(xpub6CY33ZvKuS63kwDnpNCCo5YrrgkUXgdUKdCVhd4Dru5gCB3z8wGnqFiUP98Za5pYSYF5KmvBHTY3Ra8FAJGggzBjuHS69WzN8gscPupuZwK,l:pk(xpub6E9a95u27Qqa13XEz62FUgaFMWzHnHrb54dWfMHU7K9A33eDccvUkbu1sHYoByHAgJdR326rWqn9pGZgZHz1afDprW5gGwS4gUX8Ri6aGPZ/2))", + "tr(xpub6CY33ZvKuS63kwDnpNCCo5YrrgkUXgdUKdCVhd4Dru5gCB3z8wGnqFiUP98Za5pYSYF5KmvBHTY3Ra8FAJGggzBjuHS69WzN8gscPupuZwK,l:pk(xpub6E9a95u27Qqa13XEz62FUgaFMWzHnHrb54dWfMHU7K9A33eDccvUkbu1sHYoByHAgJdR326rWqn9pGZgZHz1afDprW5gGwS4gUX8Ri6aGPZ/3))", + }, + XONLY_KEYS, + { + {{"512094cb097990da64eebbad7b979b1326f3cbe356357abf4deb4c4ff80c7acbe902"}}, + {{"5120f091450b88c606f5cbc3f0cebe89e00bc5dd27f92e22f54da06439bc0c401f41"}}, + }, + OutputType::BECH32M, + { + {{2}, {}}, + {{3}, {}}, + } + ); CheckUnparsable("pkh(xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/<0;1>/<2;3>)", "pkh(xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/<0;1>/<2;3>)", "pkh(): Multiple multipath key path specifiers found"); CheckUnparsable("pkh([deadbeef/<0;1>]xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/0)", "pkh([deadbeef/<0;1>]xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/0)", "pkh(): Key path value \'<0;1>\' specifies multipath in a section where multipath is not allowed"); CheckUnparsable("tr(xprv9s21ZrQH143K2Zu2kTVKcQi9nKhfgJUkYqG73wXsHuhATm1wkt6kcSZeTYEw2PL7krZtJopEYDvBdYWdAai3n3TWUTCVfHvPHqTYJv7smYe/6/*,{pk(xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/<1;2;3>/0/*),pk(xprv9s21ZrQH143K3jUwNHoqQNrtzJnJmx4Yup8NkNLdVQCymYbPbJXnPhwkfTfxZfptcs3rLAPUXS39oDLgrNKQGwbGsEmJJ8BU3RzQuvShEG4/0/0/<3;4>/*)})", "tr(xpub6B4sSbNr8XFYXqqKB7PeUemqgEaVtCLjgd5Lf2VYtezSHozC7ffCvVNCyu9TCgHntRQdimjV3tHbxmNfocxtuh6saNtZEw91gjXLRhQ3Yar/6/*,{pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL/<1;2;3>/0/*),pk(xpub6AhFhZJJGt9YB8i85RfrJ8jT3T2FF5EejDCXqXfm1DAczFEXkk8HD3CXTg2TmKM8wTbSnSw3wPg5JuyLitUrpRmkjn2BQXyZnqJx16AGy94/0/0/<3;4>/*)})", "tr(): Multipath subscripts have mismatched lengths"); From f6e88931f071f06cb8aefeac61cf3bce662eea74 Mon Sep 17 00:00:00 2001 From: ismaelsadeeq Date: Sat, 22 Jun 2024 13:16:23 +0100 Subject: [PATCH 10/62] test: test that `create_self_transfer_multi` respects `target_vsize` --- test/functional/feature_framework_miniwallet.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/test/functional/feature_framework_miniwallet.py b/test/functional/feature_framework_miniwallet.py index f723f7f31e..e92ea1b339 100755 --- a/test/functional/feature_framework_miniwallet.py +++ b/test/functional/feature_framework_miniwallet.py @@ -29,8 +29,11 @@ def test_tx_padding(self): utxo = wallet.get_utxo(mark_as_spent=False) for target_vsize in [250, 500, 1250, 2500, 5000, 12500, 25000, 50000, 1000000, 248, 501, 1085, 3343, 5805, 12289, 25509, 55855, 999998]: - tx = wallet.create_self_transfer(utxo_to_spend=utxo, target_vsize=target_vsize)["tx"] - assert_equal(tx.get_vsize(), target_vsize) + tx = wallet.create_self_transfer(utxo_to_spend=utxo, target_vsize=target_vsize) + assert_equal(tx['tx'].get_vsize(), target_vsize) + child_tx = wallet.create_self_transfer_multi(utxos_to_spend=[tx["new_utxo"]], target_vsize=target_vsize) + assert_equal(child_tx['tx'].get_vsize(), target_vsize) + def test_wallet_tagging(self): """Verify that tagged wallet instances are able to send funds.""" From a8780c937f730f4fa2ef2045458e3908bb2176e4 Mon Sep 17 00:00:00 2001 From: ismaelsadeeq Date: Sat, 22 Jun 2024 14:13:54 +0100 Subject: [PATCH 11/62] test: raise an error if output value is <= 0 in `create_self_transfer` --- test/functional/test_framework/wallet.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/functional/test_framework/wallet.py b/test/functional/test_framework/wallet.py index 1cef714705..32045e2332 100644 --- a/test/functional/test_framework/wallet.py +++ b/test/functional/test_framework/wallet.py @@ -378,7 +378,8 @@ def create_self_transfer( if target_vsize and not fee: # respect fee_rate if target vsize is passed fee = get_fee(target_vsize, fee_rate) send_value = utxo_to_spend["value"] - (fee or (fee_rate * vsize / 1000)) - + if send_value <= 0: + raise RuntimeError(f"UTXO value {utxo_to_spend['value']} is too small to cover fees {(fee or (fee_rate * vsize / 1000))}") # create tx tx = self.create_self_transfer_multi( utxos_to_spend=[utxo_to_spend], From 92787dd52cd4ddc378cf1bcb7e92a649916a0f42 Mon Sep 17 00:00:00 2001 From: ismaelsadeeq Date: Tue, 1 Oct 2024 23:21:29 +0100 Subject: [PATCH 12/62] test: raise an error when target_vsize is below tx virtual size --- test/functional/test_framework/wallet.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/functional/test_framework/wallet.py b/test/functional/test_framework/wallet.py index 32045e2332..0a8c08474f 100644 --- a/test/functional/test_framework/wallet.py +++ b/test/functional/test_framework/wallet.py @@ -121,6 +121,9 @@ def _bulk_tx(self, tx, target_vsize): """Pad a transaction with extra outputs until it reaches a target vsize. returns the tx """ + if target_vsize < tx.get_vsize(): + raise RuntimeError(f"target_vsize {target_vsize} is less than transaction virtual size {tx.get_vsize()}") + tx.vout.append(CTxOut(nValue=0, scriptPubKey=CScript([OP_RETURN]))) # determine number of needed padding bytes dummy_vbytes = target_vsize - tx.get_vsize() From fa3c787b62af6abaac35a8f0d785becdb8871cc0 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Fri, 20 Dec 2024 16:15:32 +0100 Subject: [PATCH 13/62] fuzz: Abort when global PRNG is used before SeedRand::ZEROS --- src/test/fuzz/fuzz.cpp | 6 +++--- src/test/util/random.cpp | 8 +++++--- src/test/util/setup_common.cpp | 2 ++ 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/src/test/fuzz/fuzz.cpp b/src/test/fuzz/fuzz.cpp index e4e4723c74..6afb820bb2 100644 --- a/src/test/fuzz/fuzz.cpp +++ b/src/test/fuzz/fuzz.cpp @@ -79,7 +79,7 @@ void FuzzFrameworkRegisterTarget(std::string_view name, TypeTestOneInput target, static std::string_view g_fuzz_target; static const TypeTestOneInput* g_test_one_input{nullptr}; -inline void test_one_input(FuzzBufferType buffer) +static void test_one_input(FuzzBufferType buffer) { CheckGlobals check{}; (*Assert(g_test_one_input))(buffer); @@ -108,12 +108,12 @@ void ResetCoverageCounters() {} #endif -void initialize() +static void initialize() { // By default, make the RNG deterministic with a fixed seed. This will affect all // randomness during the fuzz test, except: // - GetStrongRandBytes(), which is used for the creation of private key material. - // - Creating a BasicTestingSetup or derived class will switch to a random seed. + // - Randomness obtained before this call in g_rng_temp_path_init SeedRandomStateForTest(SeedRand::ZEROS); // Terminate immediately if a fuzzing harness ever tries to create a socket. diff --git a/src/test/util/random.cpp b/src/test/util/random.cpp index 770848f708..d75f1ef8a6 100644 --- a/src/test/util/random.cpp +++ b/src/test/util/random.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2023 The Bitcoin Core developers +// Copyright (c) 2023-present The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -24,7 +24,8 @@ void SeedRandomStateForTest(SeedRand seedtype) // MakeRandDeterministicDANGEROUS is called, the output of GetRandHash is // no longer truly random. It should be enough to get the seed once for the // process. - static const uint256 ctx_seed = []() { + static const auto g_ctx_seed = []() -> std::optional { + if constexpr (G_FUZZING) return {}; // If RANDOM_CTX_SEED is set, use that as seed. if (const char* num{std::getenv(RANDOM_CTX_SEED)}) { if (auto num_parsed{uint256::FromUserHex(num)}) { @@ -41,8 +42,9 @@ void SeedRandomStateForTest(SeedRand seedtype) g_seeded_g_prng_zero = seedtype == SeedRand::ZEROS; if constexpr (G_FUZZING) { Assert(g_seeded_g_prng_zero); // Only SeedRandomStateForTest(SeedRand::ZEROS) is allowed in fuzz tests + Assert(!g_used_g_prng); // The global PRNG must not have been used before SeedRandomStateForTest(SeedRand::ZEROS) } - const uint256& seed{seedtype == SeedRand::FIXED_SEED ? ctx_seed : uint256::ZERO}; + const uint256& seed{seedtype == SeedRand::FIXED_SEED ? g_ctx_seed.value() : uint256::ZERO}; LogInfo("Setting random seed for current tests to %s=%s\n", RANDOM_CTX_SEED, seed.GetHex()); MakeRandDeterministicDANGEROUS(seed); } diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp index 3f8c6f41ba..c9ea4b77c0 100644 --- a/src/test/util/setup_common.cpp +++ b/src/test/util/setup_common.cpp @@ -79,7 +79,9 @@ constexpr inline auto TEST_DIR_PATH_ELEMENT{"test_common bitcoin"}; // Includes static FastRandomContext g_rng_temp_path; static const bool g_rng_temp_path_init{[] { // Must be initialized before any SeedRandomForTest + Assert(!g_used_g_prng); (void)g_rng_temp_path.rand64(); + g_used_g_prng = false; return true; }()}; From 34f9a0157aad7c10ac364b7e4602c5f74c1f9e20 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C5=91rinc?= Date: Thu, 9 Jan 2025 12:41:44 +0100 Subject: [PATCH 14/62] refactor,bench: rename bench/readblock.cpp to bench/readwriteblock.cpp Done in separate commit to simplify review. Also renames benchmarks, since they're not strictly tests. Co-authored-by: Hodlinator <172445034+hodlinator@users.noreply.github.com> --- src/bench/CMakeLists.txt | 2 +- src/bench/{readblock.cpp => readwriteblock.cpp} | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) rename src/bench/{readblock.cpp => readwriteblock.cpp} (85%) diff --git a/src/bench/CMakeLists.txt b/src/bench/CMakeLists.txt index 4589ef177c..c55bbb1e05 100644 --- a/src/bench/CMakeLists.txt +++ b/src/bench/CMakeLists.txt @@ -44,7 +44,7 @@ add_executable(bench_bitcoin pool.cpp prevector.cpp random.cpp - readblock.cpp + readwriteblock.cpp rollingbloom.cpp rpc_blockchain.cpp rpc_mempool.cpp diff --git a/src/bench/readblock.cpp b/src/bench/readwriteblock.cpp similarity index 85% rename from src/bench/readblock.cpp rename to src/bench/readwriteblock.cpp index 058d953b4e..7fd9adc2c1 100644 --- a/src/bench/readblock.cpp +++ b/src/bench/readwriteblock.cpp @@ -28,7 +28,7 @@ static FlatFilePos WriteBlockToDisk(ChainstateManager& chainman) return chainman.m_blockman.SaveBlockToDisk(block, 0); } -static void ReadBlockFromDiskTest(benchmark::Bench& bench) +static void ReadBlockFromDiskBench(benchmark::Bench& bench) { const auto testing_setup{MakeNoLogFileContext(ChainType::MAIN)}; ChainstateManager& chainman{*testing_setup->m_node.chainman}; @@ -42,7 +42,7 @@ static void ReadBlockFromDiskTest(benchmark::Bench& bench) }); } -static void ReadRawBlockFromDiskTest(benchmark::Bench& bench) +static void ReadRawBlockFromDiskBench(benchmark::Bench& bench) { const auto testing_setup{MakeNoLogFileContext(ChainType::MAIN)}; ChainstateManager& chainman{*testing_setup->m_node.chainman}; @@ -56,5 +56,5 @@ static void ReadRawBlockFromDiskTest(benchmark::Bench& bench) }); } -BENCHMARK(ReadBlockFromDiskTest, benchmark::PriorityLevel::HIGH); -BENCHMARK(ReadRawBlockFromDiskTest, benchmark::PriorityLevel::HIGH); +BENCHMARK(ReadBlockFromDiskBench, benchmark::PriorityLevel::HIGH); +BENCHMARK(ReadRawBlockFromDiskBench, benchmark::PriorityLevel::HIGH); From 86b85bb11f8999eb59e34bd026b0791dc866f2eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C5=91rinc?= Date: Thu, 9 Jan 2025 12:42:01 +0100 Subject: [PATCH 15/62] bench: add SaveBlockBench --- src/bench/readwriteblock.cpp | 32 ++++++++++++++++++++------------ 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/src/bench/readwriteblock.cpp b/src/bench/readwriteblock.cpp index 7fd9adc2c1..e6d87c6ef0 100644 --- a/src/bench/readwriteblock.cpp +++ b/src/bench/readwriteblock.cpp @@ -19,25 +19,33 @@ #include #include -static FlatFilePos WriteBlockToDisk(ChainstateManager& chainman) +static CBlock CreateTestBlock() { DataStream stream{benchmark::data::block413567}; CBlock block; stream >> TX_WITH_WITNESS(block); + return block; +} - return chainman.m_blockman.SaveBlockToDisk(block, 0); +static void SaveBlockBench(benchmark::Bench& bench) +{ + const auto testing_setup{MakeNoLogFileContext(ChainType::MAIN)}; + auto& blockman{testing_setup->m_node.chainman->m_blockman}; + const CBlock block{CreateTestBlock()}; + bench.run([&] { + const auto pos{blockman.SaveBlockToDisk(block, 413'567)}; + assert(!pos.IsNull()); + }); } static void ReadBlockFromDiskBench(benchmark::Bench& bench) { const auto testing_setup{MakeNoLogFileContext(ChainType::MAIN)}; - ChainstateManager& chainman{*testing_setup->m_node.chainman}; - + auto& blockman{testing_setup->m_node.chainman->m_blockman}; + const auto pos{blockman.SaveBlockToDisk(CreateTestBlock(), 413'567)}; CBlock block; - const auto pos{WriteBlockToDisk(chainman)}; - bench.run([&] { - const auto success{chainman.m_blockman.ReadBlockFromDisk(block, pos)}; + const auto success{blockman.ReadBlockFromDisk(block, pos)}; assert(success); }); } @@ -45,16 +53,16 @@ static void ReadBlockFromDiskBench(benchmark::Bench& bench) static void ReadRawBlockFromDiskBench(benchmark::Bench& bench) { const auto testing_setup{MakeNoLogFileContext(ChainType::MAIN)}; - ChainstateManager& chainman{*testing_setup->m_node.chainman}; - + auto& blockman{testing_setup->m_node.chainman->m_blockman}; + const auto pos{blockman.SaveBlockToDisk(CreateTestBlock(), 413'567)}; std::vector block_data; - const auto pos{WriteBlockToDisk(chainman)}; - + blockman.ReadRawBlockFromDisk(block_data, pos); // warmup bench.run([&] { - const auto success{chainman.m_blockman.ReadRawBlockFromDisk(block_data, pos)}; + const auto success{blockman.ReadRawBlockFromDisk(block_data, pos)}; assert(success); }); } +BENCHMARK(SaveBlockBench, benchmark::PriorityLevel::HIGH); BENCHMARK(ReadBlockFromDiskBench, benchmark::PriorityLevel::HIGH); BENCHMARK(ReadRawBlockFromDiskBench, benchmark::PriorityLevel::HIGH); From 42bc4914658d9834a653bd1763aa8f0d54355480 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C5=91rinc?= Date: Wed, 18 Dec 2024 12:38:55 +0100 Subject: [PATCH 16/62] refactor,blocks: inline `UndoWriteToDisk` `UndoWriteToDisk` wasn't really extracting a meaningful subset of the `WriteUndoDataForBlock` functionality, it's tied closely to the only caller (needs the header size twice, recalculated undo serializes size, returns multiple branches, modifies parameter, needs documentation). The inlined code should only differ in these parts (modernization will be done in other commits): * renamed `_pos` to `pos` in `WriteUndoDataForBlock` to match the parameter name; * inlined `hashBlock` parameter usage into `hasher << block.pprev->GetBlockHash()`; * changed `return false` to `return FatalError`; * capitalize comment. Co-authored-by: Ryan Ofsky --- src/node/blockstorage.cpp | 62 +++++++++++++++++---------------------- src/node/blockstorage.h | 1 - 2 files changed, 27 insertions(+), 36 deletions(-) diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index 07878a5602..3ee6b5d210 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -669,33 +669,6 @@ CBlockFileInfo* BlockManager::GetBlockFileInfo(size_t n) return &m_blockfile_info.at(n); } -bool BlockManager::UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const uint256& hashBlock) const -{ - // Open history file to append - AutoFile fileout{OpenUndoFile(pos)}; - if (fileout.IsNull()) { - LogError("%s: OpenUndoFile failed\n", __func__); - return false; - } - - // Write index header - unsigned int nSize = GetSerializeSize(blockundo); - fileout << GetParams().MessageStart() << nSize; - - // Write undo data - long fileOutPos = fileout.tell(); - pos.nPos = (unsigned int)fileOutPos; - fileout << blockundo; - - // calculate & write checksum - HashWriter hasher{}; - hasher << hashBlock; - hasher << blockundo; - fileout << hasher.GetHash(); - - return true; -} - bool BlockManager::UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex& index) const { const FlatFilePos pos{WITH_LOCK(::cs_main, return index.GetUndoPos())}; @@ -992,33 +965,52 @@ bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValid // Write undo information to disk if (block.GetUndoPos().IsNull()) { - FlatFilePos _pos; - if (!FindUndoPos(state, block.nFile, _pos, ::GetSerializeSize(blockundo) + 40)) { + FlatFilePos pos; + if (!FindUndoPos(state, block.nFile, pos, ::GetSerializeSize(blockundo) + 40)) { LogError("%s: FindUndoPos failed\n", __func__); return false; } - if (!UndoWriteToDisk(blockundo, _pos, block.pprev->GetBlockHash())) { + // Open history file to append + AutoFile fileout{OpenUndoFile(pos)}; + if (fileout.IsNull()) { + LogError("%s: OpenUndoFile failed\n", __func__); return FatalError(m_opts.notifications, state, _("Failed to write undo data.")); } + + // Write index header + unsigned int nSize = GetSerializeSize(blockundo); + fileout << GetParams().MessageStart() << nSize; + + // Write undo data + long fileOutPos = fileout.tell(); + pos.nPos = (unsigned int)fileOutPos; + fileout << blockundo; + + // Calculate & write checksum + HashWriter hasher{}; + hasher << block.pprev->GetBlockHash(); + hasher << blockundo; + fileout << hasher.GetHash(); + // rev files are written in block height order, whereas blk files are written as blocks come in (often out of order) // we want to flush the rev (undo) file once we've written the last block, which is indicated by the last height // in the block file info as below; note that this does not catch the case where the undo writes are keeping up // with the block writes (usually when a synced up node is getting newly mined blocks) -- this case is caught in // the FindNextBlockPos function - if (_pos.nFile < cursor.file_num && static_cast(block.nHeight) == m_blockfile_info[_pos.nFile].nHeightLast) { + if (pos.nFile < cursor.file_num && static_cast(block.nHeight) == m_blockfile_info[pos.nFile].nHeightLast) { // Do not propagate the return code, a failed flush here should not // be an indication for a failed write. If it were propagated here, // the caller would assume the undo data not to be written, when in // fact it is. Note though, that a failed flush might leave the data // file untrimmed. - if (!FlushUndoFile(_pos.nFile, true)) { - LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning, "Failed to flush undo file %05i\n", _pos.nFile); + if (!FlushUndoFile(pos.nFile, true)) { + LogPrintLevel(BCLog::BLOCKSTORAGE, BCLog::Level::Warning, "Failed to flush undo file %05i\n", pos.nFile); } - } else if (_pos.nFile == cursor.file_num && block.nHeight > cursor.undo_height) { + } else if (pos.nFile == cursor.file_num && block.nHeight > cursor.undo_height) { cursor.undo_height = block.nHeight; } // update nUndoPos in block index - block.nUndoPos = _pos.nPos; + block.nUndoPos = pos.nPos; block.nStatus |= BLOCK_HAVE_UNDO; m_dirty_blockindex.insert(&block); } diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h index ac6db0558d..339282227c 100644 --- a/src/node/blockstorage.h +++ b/src/node/blockstorage.h @@ -176,7 +176,6 @@ class BlockManager * (BLOCK_SERIALIZATION_HEADER_SIZE) */ bool WriteBlockToDisk(const CBlock& block, FlatFilePos& pos) const; - bool UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const uint256& hashBlock) const; /* Calculate the block/rev files to delete based on height specified by user with RPC command pruneblockchain */ void FindFilesToPruneManual( From dfb2f9d004860c95fc6f0d4a016a9c038d53a475 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C5=91rinc?= Date: Wed, 18 Dec 2024 12:44:31 +0100 Subject: [PATCH 17/62] refactor,blocks: inline `WriteBlockToDisk` Similarly, `WriteBlockToDisk` wasn't really extracting a meaningful subset of the `SaveBlockToDisk` functionality, it's tied closely to the only caller (needs the header size twice, recalculated block serializes size, returns multiple branches, mutates parameter). The inlined code should only differ in these parts (modernization will be done in other commits): * renamed `blockPos` to `pos` in `SaveBlockToDisk` to match the parameter name; * changed `return false` to `return FlatFilePos()`. Also removed remaining references to `SaveBlockToDisk`. Co-authored-by: Ryan Ofsky --- src/node/blockstorage.cpp | 43 ++++++++++++++++----------------------- src/node/blockstorage.h | 15 +++----------- 2 files changed, 21 insertions(+), 37 deletions(-) diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index 3ee6b5d210..93dd23a5c8 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -936,27 +936,6 @@ bool BlockManager::FindUndoPos(BlockValidationState& state, int nFile, FlatFileP return true; } -bool BlockManager::WriteBlockToDisk(const CBlock& block, FlatFilePos& pos) const -{ - // Open history file to append - AutoFile fileout{OpenBlockFile(pos)}; - if (fileout.IsNull()) { - LogError("%s: OpenBlockFile failed\n", __func__); - return false; - } - - // Write index header - unsigned int nSize = GetSerializeSize(TX_WITH_WITNESS(block)); - fileout << GetParams().MessageStart() << nSize; - - // Write block - long fileOutPos = fileout.tell(); - pos.nPos = (unsigned int)fileOutPos; - fileout << TX_WITH_WITNESS(block); - - return true; -} - bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex& block) { AssertLockHeld(::cs_main); @@ -1117,16 +1096,30 @@ FlatFilePos BlockManager::SaveBlockToDisk(const CBlock& block, int nHeight) // Account for the 4 magic message start bytes + the 4 length bytes (8 bytes total, // defined as BLOCK_SERIALIZATION_HEADER_SIZE) nBlockSize += static_cast(BLOCK_SERIALIZATION_HEADER_SIZE); - FlatFilePos blockPos{FindNextBlockPos(nBlockSize, nHeight, block.GetBlockTime())}; - if (blockPos.IsNull()) { + FlatFilePos pos{FindNextBlockPos(nBlockSize, nHeight, block.GetBlockTime())}; + if (pos.IsNull()) { LogError("%s: FindNextBlockPos failed\n", __func__); return FlatFilePos(); } - if (!WriteBlockToDisk(block, blockPos)) { + + // Open history file to append + AutoFile fileout{OpenBlockFile(pos)}; + if (fileout.IsNull()) { + LogError("%s: OpenBlockFile failed\n", __func__); m_opts.notifications.fatalError(_("Failed to write block.")); return FlatFilePos(); } - return blockPos; + + // Write index header + unsigned int nSize = GetSerializeSize(TX_WITH_WITNESS(block)); + fileout << GetParams().MessageStart() << nSize; + + // Write block + long fileOutPos = fileout.tell(); + pos.nPos = (unsigned int)fileOutPos; + fileout << TX_WITH_WITNESS(block); + + return pos; } static auto InitBlocksdirXorKey(const BlockManager::Options& opts) diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h index 339282227c..545d65a803 100644 --- a/src/node/blockstorage.h +++ b/src/node/blockstorage.h @@ -74,7 +74,7 @@ static const unsigned int UNDOFILE_CHUNK_SIZE = 0x100000; // 1 MiB /** The maximum size of a blk?????.dat file (since 0.8) */ static const unsigned int MAX_BLOCKFILE_SIZE = 0x8000000; // 128 MiB -/** Size of header written by WriteBlockToDisk before a serialized CBlock */ +/** Size of header written by SaveBlockToDisk before a serialized CBlock */ static constexpr size_t BLOCK_SERIALIZATION_HEADER_SIZE = std::tuple_size_v + sizeof(unsigned int); // Because validation code takes pointers to the map's CBlockIndex objects, if @@ -161,7 +161,7 @@ class BlockManager * blockfile info, and checks if there is enough disk space to save the block. * * The nAddSize argument passed to this function should include not just the size of the serialized CBlock, but also the size of - * separator fields which are written before it by WriteBlockToDisk (BLOCK_SERIALIZATION_HEADER_SIZE). + * separator fields (BLOCK_SERIALIZATION_HEADER_SIZE). */ [[nodiscard]] FlatFilePos FindNextBlockPos(unsigned int nAddSize, unsigned int nHeight, uint64_t nTime); [[nodiscard]] bool FlushChainstateBlockFile(int tip_height); @@ -169,14 +169,6 @@ class BlockManager AutoFile OpenUndoFile(const FlatFilePos& pos, bool fReadOnly = false) const; - /** - * Write a block to disk. The pos argument passed to this function is modified by this call. Before this call, it should - * point to an unused file location where separator fields will be written, followed by the serialized CBlock data. - * After this call, it will point to the beginning of the serialized CBlock data, after the separator fields - * (BLOCK_SERIALIZATION_HEADER_SIZE) - */ - bool WriteBlockToDisk(const CBlock& block, FlatFilePos& pos) const; - /* Calculate the block/rev files to delete based on height specified by user with RPC command pruneblockchain */ void FindFilesToPruneManual( std::set& setFilesToPrune, @@ -346,8 +338,7 @@ class BlockManager * * @param[in] block the block being processed * @param[in] nHeight the height of the block - * @param[in] pos the position of the serialized CBlock on disk. This is the position returned - * by WriteBlockToDisk pointing at the CBlock, not the separator fields before it + * @param[in] pos the position of the serialized CBlock on disk */ void UpdateBlockInfo(const CBlock& block, unsigned int nHeight, const FlatFilePos& pos); From fa39f27a0f8b8d14f6769d48f43999a3a1148e4f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C5=91rinc?= Date: Wed, 18 Dec 2024 12:29:25 +0100 Subject: [PATCH 18/62] refactor,blocks: deduplicate block's serialized size calculations For consistency `UNDO_DATA_DISK_OVERHEAD` was also extracted to avoid the constant's ambiguity. Asserts were added to help with the review - they are removed in the next commit. Co-authored-by: Ryan Ofsky --- src/node/blockstorage.cpp | 32 +++++++++++++------------------- src/node/blockstorage.h | 7 +++++-- 2 files changed, 18 insertions(+), 21 deletions(-) diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index 93dd23a5c8..b33b1c6258 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -945,7 +945,9 @@ bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValid // Write undo information to disk if (block.GetUndoPos().IsNull()) { FlatFilePos pos; - if (!FindUndoPos(state, block.nFile, pos, ::GetSerializeSize(blockundo) + 40)) { + const unsigned int blockundo_size{static_cast(GetSerializeSize(blockundo))}; + static_assert(UNDO_DATA_DISK_OVERHEAD == 40); // TODO remove + if (!FindUndoPos(state, block.nFile, pos, blockundo_size + UNDO_DATA_DISK_OVERHEAD)) { LogError("%s: FindUndoPos failed\n", __func__); return false; } @@ -957,12 +959,11 @@ bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValid } // Write index header - unsigned int nSize = GetSerializeSize(blockundo); - fileout << GetParams().MessageStart() << nSize; - + assert(blockundo_size == GetSerializeSize(blockundo)); // TODO remove + fileout << GetParams().MessageStart() << blockundo_size; // Write undo data - long fileOutPos = fileout.tell(); - pos.nPos = (unsigned int)fileOutPos; + pos.nPos += BLOCK_SERIALIZATION_HEADER_SIZE; + assert(pos.nPos == fileout.tell()); // TODO remove fileout << blockundo; // Calculate & write checksum @@ -1092,17 +1093,12 @@ bool BlockManager::ReadRawBlockFromDisk(std::vector& block, const FlatF FlatFilePos BlockManager::SaveBlockToDisk(const CBlock& block, int nHeight) { - unsigned int nBlockSize = ::GetSerializeSize(TX_WITH_WITNESS(block)); - // Account for the 4 magic message start bytes + the 4 length bytes (8 bytes total, - // defined as BLOCK_SERIALIZATION_HEADER_SIZE) - nBlockSize += static_cast(BLOCK_SERIALIZATION_HEADER_SIZE); - FlatFilePos pos{FindNextBlockPos(nBlockSize, nHeight, block.GetBlockTime())}; + const unsigned int block_size{static_cast(GetSerializeSize(TX_WITH_WITNESS(block)))}; + FlatFilePos pos{FindNextBlockPos(block_size + BLOCK_SERIALIZATION_HEADER_SIZE, nHeight, block.GetBlockTime())}; if (pos.IsNull()) { LogError("%s: FindNextBlockPos failed\n", __func__); return FlatFilePos(); } - - // Open history file to append AutoFile fileout{OpenBlockFile(pos)}; if (fileout.IsNull()) { LogError("%s: OpenBlockFile failed\n", __func__); @@ -1110,15 +1106,13 @@ FlatFilePos BlockManager::SaveBlockToDisk(const CBlock& block, int nHeight) return FlatFilePos(); } + assert(block_size == GetSerializeSize(TX_WITH_WITNESS(block))); // TODO remove // Write index header - unsigned int nSize = GetSerializeSize(TX_WITH_WITNESS(block)); - fileout << GetParams().MessageStart() << nSize; - + fileout << GetParams().MessageStart() << block_size; // Write block - long fileOutPos = fileout.tell(); - pos.nPos = (unsigned int)fileOutPos; + pos.nPos += BLOCK_SERIALIZATION_HEADER_SIZE; + assert(pos.nPos == fileout.tell()); // TODO remove fileout << TX_WITH_WITNESS(block); - return pos; } diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h index 545d65a803..e7fc7c2665 100644 --- a/src/node/blockstorage.h +++ b/src/node/blockstorage.h @@ -74,8 +74,11 @@ static const unsigned int UNDOFILE_CHUNK_SIZE = 0x100000; // 1 MiB /** The maximum size of a blk?????.dat file (since 0.8) */ static const unsigned int MAX_BLOCKFILE_SIZE = 0x8000000; // 128 MiB -/** Size of header written by SaveBlockToDisk before a serialized CBlock */ -static constexpr size_t BLOCK_SERIALIZATION_HEADER_SIZE = std::tuple_size_v + sizeof(unsigned int); +/** Size of header written by SaveBlockToDisk before a serialized CBlock (8 bytes) */ +static constexpr size_t BLOCK_SERIALIZATION_HEADER_SIZE{std::tuple_size_v + sizeof(unsigned int)}; + +/** Total overhead when writing undo data: header (8 bytes) plus checksum (32 bytes) */ +static constexpr size_t UNDO_DATA_DISK_OVERHEAD{BLOCK_SERIALIZATION_HEADER_SIZE + uint256::size()}; // Because validation code takes pointers to the map's CBlockIndex objects, if // we ever switch to another associative container, we need to either use a From baaa3b284671ba28dbbcbb43851ea46175fd2b13 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C5=91rinc?= Date: Wed, 18 Dec 2024 12:57:25 +0100 Subject: [PATCH 19/62] refactor,blocks: remove costly asserts and modernize affected logs When the behavior was changes in a previous commit (caching `GetSerializeSize` and avoiding `AutoFile.tell`), (static)asserts were added to make sure the behavior was kept - to make sure reviewers and CI validates it. We can safely remove them now. Logs were also slightly modernized since they were trivial to do. Co-authored-by: Anthony Towns Co-authored-by: Hodlinator <172445034+hodlinator@users.noreply.github.com> --- src/node/blockstorage.cpp | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index b33b1c6258..b31d77dad3 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -676,7 +676,7 @@ bool BlockManager::UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex& in // Open history file to read AutoFile filein{OpenUndoFile(pos, true)}; if (filein.IsNull()) { - LogError("%s: OpenUndoFile failed for %s\n", __func__, pos.ToString()); + LogError("OpenUndoFile failed for %s", pos.ToString()); return false; } @@ -946,24 +946,21 @@ bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValid if (block.GetUndoPos().IsNull()) { FlatFilePos pos; const unsigned int blockundo_size{static_cast(GetSerializeSize(blockundo))}; - static_assert(UNDO_DATA_DISK_OVERHEAD == 40); // TODO remove if (!FindUndoPos(state, block.nFile, pos, blockundo_size + UNDO_DATA_DISK_OVERHEAD)) { - LogError("%s: FindUndoPos failed\n", __func__); + LogError("FindUndoPos failed"); return false; } // Open history file to append AutoFile fileout{OpenUndoFile(pos)}; if (fileout.IsNull()) { - LogError("%s: OpenUndoFile failed\n", __func__); + LogError("OpenUndoFile failed"); return FatalError(m_opts.notifications, state, _("Failed to write undo data.")); } // Write index header - assert(blockundo_size == GetSerializeSize(blockundo)); // TODO remove fileout << GetParams().MessageStart() << blockundo_size; // Write undo data pos.nPos += BLOCK_SERIALIZATION_HEADER_SIZE; - assert(pos.nPos == fileout.tell()); // TODO remove fileout << blockundo; // Calculate & write checksum @@ -1096,22 +1093,20 @@ FlatFilePos BlockManager::SaveBlockToDisk(const CBlock& block, int nHeight) const unsigned int block_size{static_cast(GetSerializeSize(TX_WITH_WITNESS(block)))}; FlatFilePos pos{FindNextBlockPos(block_size + BLOCK_SERIALIZATION_HEADER_SIZE, nHeight, block.GetBlockTime())}; if (pos.IsNull()) { - LogError("%s: FindNextBlockPos failed\n", __func__); + LogError("FindNextBlockPos failed"); return FlatFilePos(); } AutoFile fileout{OpenBlockFile(pos)}; if (fileout.IsNull()) { - LogError("%s: OpenBlockFile failed\n", __func__); + LogError("OpenBlockFile failed"); m_opts.notifications.fatalError(_("Failed to write block.")); return FlatFilePos(); } - assert(block_size == GetSerializeSize(TX_WITH_WITNESS(block))); // TODO remove // Write index header fileout << GetParams().MessageStart() << block_size; // Write block pos.nPos += BLOCK_SERIALIZATION_HEADER_SIZE; - assert(pos.nPos == fileout.tell()); // TODO remove fileout << TX_WITH_WITNESS(block); return pos; } From 223081ece651dc616ff63d9ac447eedc5c2a28fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?L=C5=91rinc?= Date: Thu, 9 Jan 2025 13:31:19 +0100 Subject: [PATCH 20/62] scripted-diff: rename block and undo functions for consistency Co-authored-by: Ryan Ofsky Co-authored-by: Hodlinator <172445034+hodlinator@users.noreply.github.com> -BEGIN VERIFY SCRIPT- grep -r -wE 'WriteBlock|ReadRawBlock|ReadBlock|WriteBlockUndo|ReadBlockUndo' $(git ls-files src/ ':!src/leveldb') && \ echo "Error: One or more target names already exist!" && exit 1 sed -i \ -e 's/\bSaveBlockToDisk/WriteBlock/g' \ -e 's/\bReadRawBlockFromDisk/ReadRawBlock/g' \ -e 's/\bReadBlockFromDisk/ReadBlock/g' \ -e 's/\bWriteUndoDataForBlock/WriteBlockUndo/g' \ -e 's/\bUndoReadFromDisk/ReadBlockUndo/g' \ $(git ls-files src/ ':!src/leveldb') -END VERIFY SCRIPT- --- src/bench/readwriteblock.cpp | 20 +++++++-------- src/index/base.cpp | 4 +-- src/index/blockfilterindex.cpp | 2 +- src/index/coinstatsindex.cpp | 6 ++--- src/init.cpp | 2 +- src/net_processing.cpp | 8 +++--- src/node/blockstorage.cpp | 14 +++++------ src/node/blockstorage.h | 14 +++++------ src/node/interfaces.cpp | 2 +- src/node/transaction.cpp | 2 +- src/rest.cpp | 2 +- src/rpc/blockchain.cpp | 8 +++--- src/rpc/rawtransaction.cpp | 4 +-- src/rpc/txoutproof.cpp | 2 +- src/test/blockmanager_tests.cpp | 18 ++++++------- src/test/util/blockfilter.cpp | 4 +-- src/test/validation_chainstate_tests.cpp | 2 +- src/validation.cpp | 32 ++++++++++++------------ 18 files changed, 73 insertions(+), 73 deletions(-) diff --git a/src/bench/readwriteblock.cpp b/src/bench/readwriteblock.cpp index e6d87c6ef0..cdf86185ae 100644 --- a/src/bench/readwriteblock.cpp +++ b/src/bench/readwriteblock.cpp @@ -33,36 +33,36 @@ static void SaveBlockBench(benchmark::Bench& bench) auto& blockman{testing_setup->m_node.chainman->m_blockman}; const CBlock block{CreateTestBlock()}; bench.run([&] { - const auto pos{blockman.SaveBlockToDisk(block, 413'567)}; + const auto pos{blockman.WriteBlock(block, 413'567)}; assert(!pos.IsNull()); }); } -static void ReadBlockFromDiskBench(benchmark::Bench& bench) +static void ReadBlockBench(benchmark::Bench& bench) { const auto testing_setup{MakeNoLogFileContext(ChainType::MAIN)}; auto& blockman{testing_setup->m_node.chainman->m_blockman}; - const auto pos{blockman.SaveBlockToDisk(CreateTestBlock(), 413'567)}; + const auto pos{blockman.WriteBlock(CreateTestBlock(), 413'567)}; CBlock block; bench.run([&] { - const auto success{blockman.ReadBlockFromDisk(block, pos)}; + const auto success{blockman.ReadBlock(block, pos)}; assert(success); }); } -static void ReadRawBlockFromDiskBench(benchmark::Bench& bench) +static void ReadRawBlockBench(benchmark::Bench& bench) { const auto testing_setup{MakeNoLogFileContext(ChainType::MAIN)}; auto& blockman{testing_setup->m_node.chainman->m_blockman}; - const auto pos{blockman.SaveBlockToDisk(CreateTestBlock(), 413'567)}; + const auto pos{blockman.WriteBlock(CreateTestBlock(), 413'567)}; std::vector block_data; - blockman.ReadRawBlockFromDisk(block_data, pos); // warmup + blockman.ReadRawBlock(block_data, pos); // warmup bench.run([&] { - const auto success{blockman.ReadRawBlockFromDisk(block_data, pos)}; + const auto success{blockman.ReadRawBlock(block_data, pos)}; assert(success); }); } BENCHMARK(SaveBlockBench, benchmark::PriorityLevel::HIGH); -BENCHMARK(ReadBlockFromDiskBench, benchmark::PriorityLevel::HIGH); -BENCHMARK(ReadRawBlockFromDiskBench, benchmark::PriorityLevel::HIGH); +BENCHMARK(ReadBlockBench, benchmark::PriorityLevel::HIGH); +BENCHMARK(ReadRawBlockBench, benchmark::PriorityLevel::HIGH); diff --git a/src/index/base.cpp b/src/index/base.cpp index a8f9073d9f..1169a1c86b 100644 --- a/src/index/base.cpp +++ b/src/index/base.cpp @@ -188,7 +188,7 @@ void BaseIndex::Sync() CBlock block; interfaces::BlockInfo block_info = kernel::MakeBlockInfo(pindex); - if (!m_chainstate->m_blockman.ReadBlockFromDisk(block, *pindex)) { + if (!m_chainstate->m_blockman.ReadBlock(block, *pindex)) { FatalErrorf("%s: Failed to read block %s from disk", __func__, pindex->GetBlockHash().ToString()); return; @@ -256,7 +256,7 @@ bool BaseIndex::Rewind(const CBlockIndex* current_tip, const CBlockIndex* new_ti // In the case of a reorg, ensure persisted block locator is not stale. // Pruning has a minimum of 288 blocks-to-keep and getting the index // out of sync may be possible but a users fault. - // In case we reorg beyond the pruned depth, ReadBlockFromDisk would + // In case we reorg beyond the pruned depth, ReadBlock would // throw and lead to a graceful shutdown SetBestBlockIndex(new_tip); if (!Commit()) { diff --git a/src/index/blockfilterindex.cpp b/src/index/blockfilterindex.cpp index a808cc9085..5ce85e1f84 100644 --- a/src/index/blockfilterindex.cpp +++ b/src/index/blockfilterindex.cpp @@ -256,7 +256,7 @@ bool BlockFilterIndex::CustomAppend(const interfaces::BlockInfo& block) // pindex variable gives indexing code access to node internals. It // will be removed in upcoming commit const CBlockIndex* pindex = WITH_LOCK(cs_main, return m_chainstate->m_blockman.LookupBlockIndex(block.hash)); - if (!m_chainstate->m_blockman.UndoReadFromDisk(block_undo, *pindex)) { + if (!m_chainstate->m_blockman.ReadBlockUndo(block_undo, *pindex)) { return false; } } diff --git a/src/index/coinstatsindex.cpp b/src/index/coinstatsindex.cpp index c950a18f3f..b5869416b9 100644 --- a/src/index/coinstatsindex.cpp +++ b/src/index/coinstatsindex.cpp @@ -123,7 +123,7 @@ bool CoinStatsIndex::CustomAppend(const interfaces::BlockInfo& block) // pindex variable gives indexing code access to node internals. It // will be removed in upcoming commit const CBlockIndex* pindex = WITH_LOCK(cs_main, return m_chainstate->m_blockman.LookupBlockIndex(block.hash)); - if (!m_chainstate->m_blockman.UndoReadFromDisk(block_undo, *pindex)) { + if (!m_chainstate->m_blockman.ReadBlockUndo(block_undo, *pindex)) { return false; } @@ -287,7 +287,7 @@ bool CoinStatsIndex::CustomRewind(const interfaces::BlockRef& current_tip, const do { CBlock block; - if (!m_chainstate->m_blockman.ReadBlockFromDisk(block, *iter_tip)) { + if (!m_chainstate->m_blockman.ReadBlock(block, *iter_tip)) { LogError("%s: Failed to read block %s from disk\n", __func__, iter_tip->GetBlockHash().ToString()); return false; @@ -415,7 +415,7 @@ bool CoinStatsIndex::ReverseBlock(const CBlock& block, const CBlockIndex* pindex // Ignore genesis block if (pindex->nHeight > 0) { - if (!m_chainstate->m_blockman.UndoReadFromDisk(block_undo, *pindex)) { + if (!m_chainstate->m_blockman.ReadBlockUndo(block_undo, *pindex)) { return false; } diff --git a/src/init.cpp b/src/init.cpp index b5adcf6476..190e4f15bf 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -1588,7 +1588,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) g_zmq_notification_interface = CZMQNotificationInterface::Create( [&chainman = node.chainman](std::vector& block, const CBlockIndex& index) { assert(chainman); - return chainman->m_blockman.ReadRawBlockFromDisk(block, WITH_LOCK(cs_main, return index.GetBlockPos())); + return chainman->m_blockman.ReadRawBlock(block, WITH_LOCK(cs_main, return index.GetBlockPos())); }); if (g_zmq_notification_interface) { diff --git a/src/net_processing.cpp b/src/net_processing.cpp index a19443c0f5..6a8d40dae2 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -2268,7 +2268,7 @@ void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& // Fast-path: in this case it is possible to serve the block directly from disk, // as the network format matches the format on disk std::vector block_data; - if (!m_chainman.m_blockman.ReadRawBlockFromDisk(block_data, block_pos)) { + if (!m_chainman.m_blockman.ReadRawBlock(block_data, block_pos)) { if (WITH_LOCK(m_chainman.GetMutex(), return m_chainman.m_blockman.IsBlockPruned(*pindex))) { LogDebug(BCLog::NET, "Block was pruned before it could be read, %s\n", pfrom.DisconnectMsg(fLogIPs)); } else { @@ -2282,7 +2282,7 @@ void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv& } else { // Send block from disk std::shared_ptr pblockRead = std::make_shared(); - if (!m_chainman.m_blockman.ReadBlockFromDisk(*pblockRead, block_pos)) { + if (!m_chainman.m_blockman.ReadBlock(*pblockRead, block_pos)) { if (WITH_LOCK(m_chainman.GetMutex(), return m_chainman.m_blockman.IsBlockPruned(*pindex))) { LogDebug(BCLog::NET, "Block was pruned before it could be read, %s\n", pfrom.DisconnectMsg(fLogIPs)); } else { @@ -4096,7 +4096,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, if (!block_pos.IsNull()) { CBlock block; - const bool ret{m_chainman.m_blockman.ReadBlockFromDisk(block, block_pos)}; + const bool ret{m_chainman.m_blockman.ReadBlock(block, block_pos)}; // If height is above MAX_BLOCKTXN_DEPTH then this block cannot get // pruned after we release cs_main above, so this read should never fail. assert(ret); @@ -5599,7 +5599,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto) PushMessage(*pto, std::move(cached_cmpctblock_msg.value())); } else { CBlock block; - const bool ret{m_chainman.m_blockman.ReadBlockFromDisk(block, *pBestIndex)}; + const bool ret{m_chainman.m_blockman.ReadBlock(block, *pBestIndex)}; assert(ret); CBlockHeaderAndShortTxIDs cmpctblock{block, m_rng.rand64()}; MakeAndPushMessage(*pto, NetMsgType::CMPCTBLOCK, cmpctblock); diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index b31d77dad3..57c82e5d5f 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -669,7 +669,7 @@ CBlockFileInfo* BlockManager::GetBlockFileInfo(size_t n) return &m_blockfile_info.at(n); } -bool BlockManager::UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex& index) const +bool BlockManager::ReadBlockUndo(CBlockUndo& blockundo, const CBlockIndex& index) const { const FlatFilePos pos{WITH_LOCK(::cs_main, return index.GetUndoPos())}; @@ -936,7 +936,7 @@ bool BlockManager::FindUndoPos(BlockValidationState& state, int nFile, FlatFileP return true; } -bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex& block) +bool BlockManager::WriteBlockUndo(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex& block) { AssertLockHeld(::cs_main); const BlockfileType type = BlockfileTypeForHeight(block.nHeight); @@ -995,7 +995,7 @@ bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValid return true; } -bool BlockManager::ReadBlockFromDisk(CBlock& block, const FlatFilePos& pos) const +bool BlockManager::ReadBlock(CBlock& block, const FlatFilePos& pos) const { block.SetNull(); @@ -1029,11 +1029,11 @@ bool BlockManager::ReadBlockFromDisk(CBlock& block, const FlatFilePos& pos) cons return true; } -bool BlockManager::ReadBlockFromDisk(CBlock& block, const CBlockIndex& index) const +bool BlockManager::ReadBlock(CBlock& block, const CBlockIndex& index) const { const FlatFilePos block_pos{WITH_LOCK(cs_main, return index.GetBlockPos())}; - if (!ReadBlockFromDisk(block, block_pos)) { + if (!ReadBlock(block, block_pos)) { return false; } if (block.GetHash() != index.GetBlockHash()) { @@ -1043,7 +1043,7 @@ bool BlockManager::ReadBlockFromDisk(CBlock& block, const CBlockIndex& index) co return true; } -bool BlockManager::ReadRawBlockFromDisk(std::vector& block, const FlatFilePos& pos) const +bool BlockManager::ReadRawBlock(std::vector& block, const FlatFilePos& pos) const { FlatFilePos hpos = pos; // If nPos is less than 8 the pos is null and we don't have the block data @@ -1088,7 +1088,7 @@ bool BlockManager::ReadRawBlockFromDisk(std::vector& block, const FlatF return true; } -FlatFilePos BlockManager::SaveBlockToDisk(const CBlock& block, int nHeight) +FlatFilePos BlockManager::WriteBlock(const CBlock& block, int nHeight) { const unsigned int block_size{static_cast(GetSerializeSize(TX_WITH_WITNESS(block)))}; FlatFilePos pos{FindNextBlockPos(block_size + BLOCK_SERIALIZATION_HEADER_SIZE, nHeight, block.GetBlockTime())}; diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h index e7fc7c2665..8a34efadfe 100644 --- a/src/node/blockstorage.h +++ b/src/node/blockstorage.h @@ -74,7 +74,7 @@ static const unsigned int UNDOFILE_CHUNK_SIZE = 0x100000; // 1 MiB /** The maximum size of a blk?????.dat file (since 0.8) */ static const unsigned int MAX_BLOCKFILE_SIZE = 0x8000000; // 128 MiB -/** Size of header written by SaveBlockToDisk before a serialized CBlock (8 bytes) */ +/** Size of header written by WriteBlock before a serialized CBlock (8 bytes) */ static constexpr size_t BLOCK_SERIALIZATION_HEADER_SIZE{std::tuple_size_v + sizeof(unsigned int)}; /** Total overhead when writing undo data: header (8 bytes) plus checksum (32 bytes) */ @@ -324,7 +324,7 @@ class BlockManager /** Get block file info entry for one block file */ CBlockFileInfo* GetBlockFileInfo(size_t n); - bool WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex& block) + bool WriteBlockUndo(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex& block) EXCLUSIVE_LOCKS_REQUIRED(::cs_main); /** Store block on disk and update block file statistics. @@ -335,7 +335,7 @@ class BlockManager * @returns in case of success, the position to which the block was written to * in case of an error, an empty FlatFilePos */ - FlatFilePos SaveBlockToDisk(const CBlock& block, int nHeight); + FlatFilePos WriteBlock(const CBlock& block, int nHeight); /** Update blockfile info while processing a block during reindex. The block must be available on disk. * @@ -414,11 +414,11 @@ class BlockManager void UnlinkPrunedFiles(const std::set& setFilesToPrune) const; /** Functions for disk access for blocks */ - bool ReadBlockFromDisk(CBlock& block, const FlatFilePos& pos) const; - bool ReadBlockFromDisk(CBlock& block, const CBlockIndex& index) const; - bool ReadRawBlockFromDisk(std::vector& block, const FlatFilePos& pos) const; + bool ReadBlock(CBlock& block, const FlatFilePos& pos) const; + bool ReadBlock(CBlock& block, const CBlockIndex& index) const; + bool ReadRawBlock(std::vector& block, const FlatFilePos& pos) const; - bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex& index) const; + bool ReadBlockUndo(CBlockUndo& blockundo, const CBlockIndex& index) const; void CleanupBlockRevFiles() const; }; diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp index f3b8c6a072..fe88a6dad1 100644 --- a/src/node/interfaces.cpp +++ b/src/node/interfaces.cpp @@ -442,7 +442,7 @@ bool FillBlock(const CBlockIndex* index, const FoundBlock& block, UniqueLocknHeight] == index ? active[index->nHeight + 1] : nullptr, *block.m_next_block, lock, active, blockman); if (block.m_data) { REVERSE_LOCK(lock); - if (!blockman.ReadBlockFromDisk(*block.m_data, *index)) block.m_data->SetNull(); + if (!blockman.ReadBlock(*block.m_data, *index)) block.m_data->SetNull(); } block.found = true; return true; diff --git a/src/node/transaction.cpp b/src/node/transaction.cpp index 0f45da45db..666597391e 100644 --- a/src/node/transaction.cpp +++ b/src/node/transaction.cpp @@ -144,7 +144,7 @@ CTransactionRef GetTransaction(const CBlockIndex* const block_index, const CTxMe } if (block_index) { CBlock block; - if (blockman.ReadBlockFromDisk(block, *block_index)) { + if (blockman.ReadBlock(block, *block_index)) { for (const auto& tx : block.vtx) { if (tx->GetHash() == hash) { hashBlock = block_index->GetBlockHash(); diff --git a/src/rest.cpp b/src/rest.cpp index 3e4b8b37c6..9214efcd38 100644 --- a/src/rest.cpp +++ b/src/rest.cpp @@ -319,7 +319,7 @@ static bool rest_block(const std::any& context, } std::vector block_data{}; - if (!chainman.m_blockman.ReadRawBlockFromDisk(block_data, pos)) { + if (!chainman.m_blockman.ReadRawBlock(block_data, pos)) { return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not found"); } diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 823d2303c8..266113f7e3 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -196,7 +196,7 @@ UniValue blockToJSON(BlockManager& blockman, const CBlock& block, const CBlockIn CBlockUndo blockUndo; const bool is_not_pruned{WITH_LOCK(::cs_main, return !blockman.IsBlockPruned(blockindex))}; bool have_undo{is_not_pruned && WITH_LOCK(::cs_main, return blockindex.nStatus & BLOCK_HAVE_UNDO)}; - if (have_undo && !blockman.UndoReadFromDisk(blockUndo, blockindex)) { + if (have_undo && !blockman.ReadBlockUndo(blockUndo, blockindex)) { throw JSONRPCError(RPC_INTERNAL_ERROR, "Undo data expected but can't be read. This could be due to disk corruption or a conflict with a pruning event."); } for (size_t i = 0; i < block.vtx.size(); ++i) { @@ -624,7 +624,7 @@ static CBlock GetBlockChecked(BlockManager& blockman, const CBlockIndex& blockin CheckBlockDataAvailability(blockman, blockindex, /*check_for_undo=*/false); } - if (!blockman.ReadBlockFromDisk(block, blockindex)) { + if (!blockman.ReadBlock(block, blockindex)) { // Block not found on disk. This shouldn't normally happen unless the block was // pruned right after we released the lock above. throw JSONRPCError(RPC_MISC_ERROR, "Block not found on disk"); @@ -643,7 +643,7 @@ static std::vector GetRawBlockChecked(BlockManager& blockman, const CBl pos = blockindex.GetBlockPos(); } - if (!blockman.ReadRawBlockFromDisk(data, pos)) { + if (!blockman.ReadRawBlock(data, pos)) { // Block not found on disk. This shouldn't normally happen unless the block was // pruned right after we released the lock above. throw JSONRPCError(RPC_MISC_ERROR, "Block not found on disk"); @@ -664,7 +664,7 @@ static CBlockUndo GetUndoChecked(BlockManager& blockman, const CBlockIndex& bloc CheckBlockDataAvailability(blockman, blockindex, /*check_for_undo=*/true); } - if (!blockman.UndoReadFromDisk(blockUndo, blockindex)) { + if (!blockman.ReadBlockUndo(blockUndo, blockindex)) { throw JSONRPCError(RPC_MISC_ERROR, "Can't read undo data from disk"); } diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp index 184fff9386..421656152c 100644 --- a/src/rpc/rawtransaction.cpp +++ b/src/rpc/rawtransaction.cpp @@ -390,10 +390,10 @@ static RPCHelpMan getrawtransaction() TxToJSON(*tx, hash_block, result, chainman.ActiveChainstate()); return result; } - if (!chainman.m_blockman.UndoReadFromDisk(blockUndo, *blockindex)) { + if (!chainman.m_blockman.ReadBlockUndo(blockUndo, *blockindex)) { throw JSONRPCError(RPC_INTERNAL_ERROR, "Undo data expected but can't be read. This could be due to disk corruption or a conflict with a pruning event."); } - if (!chainman.m_blockman.ReadBlockFromDisk(block, *blockindex)) { + if (!chainman.m_blockman.ReadBlock(block, *blockindex)) { throw JSONRPCError(RPC_INTERNAL_ERROR, "Block data expected but can't be read. This could be due to disk corruption or a conflict with a pruning event."); } diff --git a/src/rpc/txoutproof.cpp b/src/rpc/txoutproof.cpp index 40294fda06..77fd22000c 100644 --- a/src/rpc/txoutproof.cpp +++ b/src/rpc/txoutproof.cpp @@ -102,7 +102,7 @@ static RPCHelpMan gettxoutproof() CheckBlockDataAvailability(chainman.m_blockman, *pblockindex, /*check_for_undo=*/false); } CBlock block; - if (!chainman.m_blockman.ReadBlockFromDisk(block, *pblockindex)) { + if (!chainman.m_blockman.ReadBlock(block, *pblockindex)) { throw JSONRPCError(RPC_INTERNAL_ERROR, "Can't read block from disk"); } diff --git a/src/test/blockmanager_tests.cpp b/src/test/blockmanager_tests.cpp index c2b95dd861..5540e28fb7 100644 --- a/src/test/blockmanager_tests.cpp +++ b/src/test/blockmanager_tests.cpp @@ -36,7 +36,7 @@ BOOST_AUTO_TEST_CASE(blockmanager_find_block_pos) }; BlockManager blockman{*Assert(m_node.shutdown_signal), blockman_opts}; // simulate adding a genesis block normally - BOOST_CHECK_EQUAL(blockman.SaveBlockToDisk(params->GenesisBlock(), 0).nPos, BLOCK_SERIALIZATION_HEADER_SIZE); + BOOST_CHECK_EQUAL(blockman.WriteBlock(params->GenesisBlock(), 0).nPos, BLOCK_SERIALIZATION_HEADER_SIZE); // simulate what happens during reindex // simulate a well-formed genesis block being found at offset 8 in the blk00000.dat file // the block is found at offset 8 because there is an 8 byte serialization header @@ -49,7 +49,7 @@ BOOST_AUTO_TEST_CASE(blockmanager_find_block_pos) // this is a check to make sure that https://github.com/bitcoin/bitcoin/issues/21379 does not recur // 8 bytes (for serialization header) + 285 (for serialized genesis block) = 293 // add another 8 bytes for the second block's serialization header and we get 293 + 8 = 301 - FlatFilePos actual{blockman.SaveBlockToDisk(params->GenesisBlock(), 1)}; + FlatFilePos actual{blockman.WriteBlock(params->GenesisBlock(), 1)}; BOOST_CHECK_EQUAL(actual.nPos, BLOCK_SERIALIZATION_HEADER_SIZE + ::GetSerializeSize(TX_WITH_WITNESS(params->GenesisBlock())) + BLOCK_SERIALIZATION_HEADER_SIZE); } @@ -158,10 +158,10 @@ BOOST_AUTO_TEST_CASE(blockmanager_flush_block_file) BOOST_CHECK_EQUAL(blockman.CalculateCurrentUsage(), 0); // Write the first block to a new location. - FlatFilePos pos1{blockman.SaveBlockToDisk(block1, /*nHeight=*/1)}; + FlatFilePos pos1{blockman.WriteBlock(block1, /*nHeight=*/1)}; // Write second block - FlatFilePos pos2{blockman.SaveBlockToDisk(block2, /*nHeight=*/2)}; + FlatFilePos pos2{blockman.WriteBlock(block2, /*nHeight=*/2)}; // Two blocks in the file BOOST_CHECK_EQUAL(blockman.CalculateCurrentUsage(), (TEST_BLOCK_SIZE + BLOCK_SERIALIZATION_HEADER_SIZE) * 2); @@ -171,13 +171,13 @@ BOOST_AUTO_TEST_CASE(blockmanager_flush_block_file) CBlock read_block; BOOST_CHECK_EQUAL(read_block.nVersion, 0); { - ASSERT_DEBUG_LOG("ReadBlockFromDisk: Errors in block header"); - BOOST_CHECK(!blockman.ReadBlockFromDisk(read_block, pos1)); + ASSERT_DEBUG_LOG("ReadBlock: Errors in block header"); + BOOST_CHECK(!blockman.ReadBlock(read_block, pos1)); BOOST_CHECK_EQUAL(read_block.nVersion, 1); } { - ASSERT_DEBUG_LOG("ReadBlockFromDisk: Errors in block header"); - BOOST_CHECK(!blockman.ReadBlockFromDisk(read_block, pos2)); + ASSERT_DEBUG_LOG("ReadBlock: Errors in block header"); + BOOST_CHECK(!blockman.ReadBlock(read_block, pos2)); BOOST_CHECK_EQUAL(read_block.nVersion, 2); } @@ -194,7 +194,7 @@ BOOST_AUTO_TEST_CASE(blockmanager_flush_block_file) BOOST_CHECK_EQUAL(blockman.CalculateCurrentUsage(), (TEST_BLOCK_SIZE + BLOCK_SERIALIZATION_HEADER_SIZE) * 2); // Block 2 was not overwritten: - blockman.ReadBlockFromDisk(read_block, pos2); + blockman.ReadBlock(read_block, pos2); BOOST_CHECK_EQUAL(read_block.nVersion, 2); } diff --git a/src/test/util/blockfilter.cpp b/src/test/util/blockfilter.cpp index 19f3d51d5e..8a17f7f4f5 100644 --- a/src/test/util/blockfilter.cpp +++ b/src/test/util/blockfilter.cpp @@ -17,12 +17,12 @@ bool ComputeFilter(BlockFilterType filter_type, const CBlockIndex& block_index, LOCK(::cs_main); CBlock block; - if (!blockman.ReadBlockFromDisk(block, block_index.GetBlockPos())) { + if (!blockman.ReadBlock(block, block_index.GetBlockPos())) { return false; } CBlockUndo block_undo; - if (block_index.nHeight > 0 && !blockman.UndoReadFromDisk(block_undo, block_index)) { + if (block_index.nHeight > 0 && !blockman.ReadBlockUndo(block_undo, block_index)) { return false; } diff --git a/src/test/validation_chainstate_tests.cpp b/src/test/validation_chainstate_tests.cpp index bf8cd819f2..7204f1688d 100644 --- a/src/test/validation_chainstate_tests.cpp +++ b/src/test/validation_chainstate_tests.cpp @@ -88,7 +88,7 @@ BOOST_FIXTURE_TEST_CASE(chainstate_update_tip, TestChain100Setup) std::shared_ptr pblockone = std::make_shared(); { LOCK(::cs_main); - chainman.m_blockman.ReadBlockFromDisk(*pblockone, *chainman.ActiveChain()[1]); + chainman.m_blockman.ReadBlock(*pblockone, *chainman.ActiveChain()[1]); } BOOST_REQUIRE(CreateAndActivateUTXOSnapshot( diff --git a/src/validation.cpp b/src/validation.cpp index 3e8a7cf520..64588e802d 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -2301,7 +2301,7 @@ DisconnectResult Chainstate::DisconnectBlock(const CBlock& block, const CBlockIn bool fClean = true; CBlockUndo blockUndo; - if (!m_blockman.UndoReadFromDisk(blockUndo, *pindex)) { + if (!m_blockman.ReadBlockUndo(blockUndo, *pindex)) { LogError("DisconnectBlock(): failure reading undo data\n"); return DISCONNECT_FAILED; } @@ -2747,7 +2747,7 @@ bool Chainstate::ConnectBlock(const CBlock& block, BlockValidationState& state, return true; } - if (!m_blockman.WriteUndoDataForBlock(blockundo, state, *pindex)) { + if (!m_blockman.WriteBlockUndo(blockundo, state, *pindex)) { return false; } @@ -3068,7 +3068,7 @@ bool Chainstate::DisconnectTip(BlockValidationState& state, DisconnectedBlockTra // Read block from disk. std::shared_ptr pblock = std::make_shared(); CBlock& block = *pblock; - if (!m_blockman.ReadBlockFromDisk(block, *pindexDelete)) { + if (!m_blockman.ReadBlock(block, *pindexDelete)) { LogError("DisconnectTip(): Failed to read block\n"); return false; } @@ -3179,7 +3179,7 @@ bool Chainstate::ConnectTip(BlockValidationState& state, CBlockIndex* pindexNew, std::shared_ptr pthisBlock; if (!pblock) { std::shared_ptr pblockNew = std::make_shared(); - if (!m_blockman.ReadBlockFromDisk(*pblockNew, *pindexNew)) { + if (!m_blockman.ReadBlock(*pblockNew, *pindexNew)) { return FatalError(m_chainman.GetNotifications(), state, _("Failed to read block.")); } pthisBlock = pblockNew; @@ -4564,7 +4564,7 @@ bool ChainstateManager::AcceptBlock(const std::shared_ptr& pblock, blockPos = *dbp; m_blockman.UpdateBlockInfo(block, pindex->nHeight, blockPos); } else { - blockPos = m_blockman.SaveBlockToDisk(block, pindex->nHeight); + blockPos = m_blockman.WriteBlock(block, pindex->nHeight); if (blockPos.IsNull()) { state.Error(strprintf("%s: Failed to find position to write new block to disk", __func__)); return false; @@ -4797,8 +4797,8 @@ VerifyDBResult CVerifyDB::VerifyDB( } CBlock block; // check level 0: read from disk - if (!chainstate.m_blockman.ReadBlockFromDisk(block, *pindex)) { - LogPrintf("Verification error: ReadBlockFromDisk failed at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); + if (!chainstate.m_blockman.ReadBlock(block, *pindex)) { + LogPrintf("Verification error: ReadBlock failed at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); return VerifyDBResult::CORRUPTED_BLOCK_DB; } // check level 1: verify block validity @@ -4811,7 +4811,7 @@ VerifyDBResult CVerifyDB::VerifyDB( if (nCheckLevel >= 2 && pindex) { CBlockUndo undo; if (!pindex->GetUndoPos().IsNull()) { - if (!chainstate.m_blockman.UndoReadFromDisk(undo, *pindex)) { + if (!chainstate.m_blockman.ReadBlockUndo(undo, *pindex)) { LogPrintf("Verification error: found bad undo data at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); return VerifyDBResult::CORRUPTED_BLOCK_DB; } @@ -4863,8 +4863,8 @@ VerifyDBResult CVerifyDB::VerifyDB( m_notifications.progress(_("Verifying blocks…"), percentageDone, false); pindex = chainstate.m_chain.Next(pindex); CBlock block; - if (!chainstate.m_blockman.ReadBlockFromDisk(block, *pindex)) { - LogPrintf("Verification error: ReadBlockFromDisk failed at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); + if (!chainstate.m_blockman.ReadBlock(block, *pindex)) { + LogPrintf("Verification error: ReadBlock failed at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); return VerifyDBResult::CORRUPTED_BLOCK_DB; } if (!chainstate.ConnectBlock(block, state, pindex, coins)) { @@ -4892,8 +4892,8 @@ bool Chainstate::RollforwardBlock(const CBlockIndex* pindex, CCoinsViewCache& in AssertLockHeld(cs_main); // TODO: merge with ConnectBlock CBlock block; - if (!m_blockman.ReadBlockFromDisk(block, *pindex)) { - LogError("ReplayBlock(): ReadBlockFromDisk failed at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); + if (!m_blockman.ReadBlock(block, *pindex)) { + LogError("ReplayBlock(): ReadBlock failed at %d, hash=%s\n", pindex->nHeight, pindex->GetBlockHash().ToString()); return false; } @@ -4950,8 +4950,8 @@ bool Chainstate::ReplayBlocks() while (pindexOld != pindexFork) { if (pindexOld->nHeight > 0) { // Never disconnect the genesis block. CBlock block; - if (!m_blockman.ReadBlockFromDisk(block, *pindexOld)) { - LogError("RollbackBlock(): ReadBlockFromDisk() failed at %d, hash=%s\n", pindexOld->nHeight, pindexOld->GetBlockHash().ToString()); + if (!m_blockman.ReadBlock(block, *pindexOld)) { + LogError("RollbackBlock(): ReadBlock() failed at %d, hash=%s\n", pindexOld->nHeight, pindexOld->GetBlockHash().ToString()); return false; } LogPrintf("Rolling back %s (%i)\n", pindexOld->GetBlockHash().ToString(), pindexOld->nHeight); @@ -5062,7 +5062,7 @@ bool Chainstate::LoadGenesisBlock() try { const CBlock& block = params.GenesisBlock(); - FlatFilePos blockPos{m_blockman.SaveBlockToDisk(block, 0)}; + FlatFilePos blockPos{m_blockman.WriteBlock(block, 0)}; if (blockPos.IsNull()) { LogError("%s: writing genesis block to disk failed\n", __func__); return false; @@ -5219,7 +5219,7 @@ void ChainstateManager::LoadExternalBlockFile( while (range.first != range.second) { std::multimap::iterator it = range.first; std::shared_ptr pblockrecursive = std::make_shared(); - if (m_blockman.ReadBlockFromDisk(*pblockrecursive, it->second)) { + if (m_blockman.ReadBlock(*pblockrecursive, it->second)) { LogDebug(BCLog::REINDEX, "%s: Processing out of order child %s of %s\n", __func__, pblockrecursive->GetHash().ToString(), head.ToString()); LOCK(cs_main); From d032ac80633aa6dab7244ec66edd73f4c8ed4ff2 Mon Sep 17 00:00:00 2001 From: fanquake Date: Mon, 21 Oct 2024 17:45:40 +0100 Subject: [PATCH 21/62] depends: add *FLAGS to gen_id The depends cache should be busted when flags change, the same as any other tooling change. Id also like to start passing *FLAGS into depends inside the Guix env, which, without this change, doesn't bust the cache. --- depends/Makefile | 12 ++++++++++-- depends/gen_id | 10 +++++++++- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/depends/Makefile b/depends/Makefile index 9ba8213a92..1053c8a249 100644 --- a/depends/Makefile +++ b/depends/Makefile @@ -141,8 +141,16 @@ include packages/packages.mk # 2. Before including packages/*.mk (excluding packages/packages.mk), since # they rely on the build_id variables # -build_id:=$(shell env CC='$(build_CC)' C_STANDARD='$(C_STANDARD)' CXX='$(build_CXX)' CXX_STANDARD='$(CXX_STANDARD)' AR='$(build_AR)' NM='$(build_NM)' RANLIB='$(build_RANLIB)' STRIP='$(build_STRIP)' SHA256SUM='$(build_SHA256SUM)' DEBUG='$(DEBUG)' LTO='$(LTO)' NO_HARDEN='$(NO_HARDEN)' ./gen_id '$(BUILD_ID_SALT)' 'GUIX_ENVIRONMENT=$(realpath $(GUIX_ENVIRONMENT))') -$(host_arch)_$(host_os)_id:=$(shell env CC='$(host_CC)' C_STANDARD='$(C_STANDARD)' CXX='$(host_CXX)' CXX_STANDARD='$(CXX_STANDARD)' AR='$(host_AR)' NM='$(host_NM)' RANLIB='$(host_RANLIB)' STRIP='$(host_STRIP)' SHA256SUM='$(build_SHA256SUM)' DEBUG='$(DEBUG)' LTO='$(LTO)' NO_HARDEN='$(NO_HARDEN)' ./gen_id '$(HOST_ID_SALT)' 'GUIX_ENVIRONMENT=$(realpath $(GUIX_ENVIRONMENT))') +build_id:=$(shell env CC='$(build_CC)' C_STANDARD='$(C_STANDARD)' CXX='$(build_CXX)' CXX_STANDARD='$(CXX_STANDARD)' \ + AR='$(build_AR)' NM='$(build_NM)' RANLIB='$(build_RANLIB)' STRIP='$(build_STRIP)' SHA256SUM='$(build_SHA256SUM)' \ + DEBUG='$(DEBUG)' NO_HARDEN='$(NO_HARDEN)' \ + ./gen_id '$(BUILD_ID_SALT)' 'GUIX_ENVIRONMENT=$(realpath $(GUIX_ENVIRONMENT))') + +$(host_arch)_$(host_os)_id:=$(shell env CC='$(host_CC)' C_STANDARD='$(C_STANDARD)' CXX='$(host_CXX)' CXX_STANDARD='$(CXX_STANDARD)' \ + CPPFLAGS='$(CPPFLAGS)' CFLAGS='$(CFLAGS)' CXXFLAGS='$(CXXFLAGS)' LDFLAGS='$(LDFLAGS)' \ + AR='$(host_AR)' NM='$(host_NM)' RANLIB='$(host_RANLIB)' STRIP='$(host_STRIP)' SHA256SUM='$(build_SHA256SUM)' \ + DEBUG='$(DEBUG)' LTO='$(LTO)' NO_HARDEN='$(NO_HARDEN)' \ + ./gen_id '$(HOST_ID_SALT)' 'GUIX_ENVIRONMENT=$(realpath $(GUIX_ENVIRONMENT))') boost_packages_$(NO_BOOST) = $(boost_packages) diff --git a/depends/gen_id b/depends/gen_id index e2e2273b2d..d79c6da85a 100755 --- a/depends/gen_id +++ b/depends/gen_id @@ -1,8 +1,9 @@ #!/usr/bin/env bash # Usage: env [ CC=... ] [ C_STANDARD=...] [ CXX=... ] [CXX_STANDARD=...] \ +# [ CPPFLAGS=... ] [CFLAGS=...] [CXXFLAGS=...] [LDFLAGS=...] \ # [ AR=... ] [ NM=... ] [ RANLIB=... ] [ STRIP=... ] [ DEBUG=... ] \ -# [ LTO=... ] [ NO_HARDEN=... ] ./build-id [ID_SALT]... +# [ LTO=... ] [ NO_HARDEN=... ] ./gen_id [ID_SALT]... # # Prints to stdout a SHA256 hash representing the current toolset, used by # depends/Makefile as a build id for caching purposes (detecting when the @@ -34,6 +35,13 @@ echo "$@" echo "END ID SALT" + echo "BEGIN FLAGS" + echo "CPPFLAGS=${CPPFLAGS}" + echo "CFLAGS=${CFLAGS}" + echo "CXXFLAGS=${CXXFLAGS}" + echo "LDFLAGS=${LDFLAGS}" + echo "END FLAGS" + # GCC only prints COLLECT_LTO_WRAPPER when invoked with just "-v", but we want # the information from "-v -E -" as well, so just include both. echo "BEGIN CC" From 01df180bfb82c7eafac4638ced249bee4409784b Mon Sep 17 00:00:00 2001 From: fanquake Date: Wed, 23 Oct 2024 17:03:32 +0100 Subject: [PATCH 22/62] depends: add mold & ld.lld to gen_id We use `lld` when cross-compiling for macOS, and it's version should be tied to LLVM. However someone compiling with GCC and `-fuse-ld=lld` would not see a cache bust if the LLVM toolchain was updated. We don't use `mold` directly, but I'm aware of it's usage in infrastructure, along with depends, used to test the project. --- depends/gen_id | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/depends/gen_id b/depends/gen_id index d79c6da85a..fe6d163547 100755 --- a/depends/gen_id +++ b/depends/gen_id @@ -58,6 +58,17 @@ echo "CXX_STANDARD=${CXX_STANDARD}" echo "END CXX" + # We use lld when cross-compiling for macOS, and it's version should + # be tied to LLVM. However someone compiling with GCC and -fuse-ld=lld + # would not see a cache bust if the LLVM toolchain was updated. + echo "BEGIN lld" + bash -c "ld.lld --version" + echo "END lld" + + echo "BEGIN mold" + bash -c "mold --version" + echo "END mold" + echo "BEGIN AR" bash -c "${AR} --version" env | grep '^AR_' From 4da7bfdcc902f22239e55cd9b90abf36b29edfde Mon Sep 17 00:00:00 2001 From: brunoerg Date: Fri, 10 Jan 2025 10:54:37 -0300 Subject: [PATCH 23/62] test: add coverage for unknown address type for `createwalletdescriptor` --- test/functional/wallet_address_types.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/functional/wallet_address_types.py b/test/functional/wallet_address_types.py index e326d3e89e..f2e1850e9d 100755 --- a/test/functional/wallet_address_types.py +++ b/test/functional/wallet_address_types.py @@ -369,6 +369,8 @@ def run_test(self): assert_raises_rpc_error(-5, "Unknown address type ''", self.nodes[3].getnewaddress, None, '') assert_raises_rpc_error(-5, "Unknown address type ''", self.nodes[3].getrawchangeaddress, '') assert_raises_rpc_error(-5, "Unknown address type 'bech23'", self.nodes[3].getrawchangeaddress, 'bech23') + if self.options.descriptors: + assert_raises_rpc_error(-5, "Unknown address type 'bech23'", self.nodes[3].createwalletdescriptor, "bech23") self.log.info("Nodes with changetype=p2sh-segwit never use a P2WPKH change output") self.test_change_output_type(4, [to_address_bech32_1], 'p2sh-segwit') From e0b333682222927d64217b07bb8cfd7ff3139660 Mon Sep 17 00:00:00 2001 From: Sebastian Falbesoner Date: Wed, 15 Jan 2025 03:22:19 +0100 Subject: [PATCH 24/62] test: p2p: fix sending of manual INVs in tx download test The `test_inv_block` sub-test in p2p_tx_download.py has a subtle bug: the manual msg_inv announcements from peers currently have no effect, since they don't match the wtxidrelay setting (=true by default for `P2PInterface` instances) and are hence ignored by the nodes (since 2d282e0c / PR #18044). Though the test still passes, it does so without the intended scenario of asking an additional peer (triggering the GETDATA_TX_INTERVAL delay). Fix this by sending the INV message with MSG_WTX instead of MSG_TX. This increases the test run time by about one minute. --- test/functional/p2p_tx_download.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/functional/p2p_tx_download.py b/test/functional/p2p_tx_download.py index c69d6ff405..f6c28b00d9 100755 --- a/test/functional/p2p_tx_download.py +++ b/test/functional/p2p_tx_download.py @@ -93,11 +93,11 @@ def getdata_found(peer_index): def test_inv_block(self): self.log.info("Generate a transaction on node 0") tx = self.wallet.create_self_transfer() - txid = int(tx['txid'], 16) + wtxid = int(tx['wtxid'], 16) self.log.info( "Announce the transaction to all nodes from all {} incoming peers, but never send it".format(NUM_INBOUND)) - msg = msg_inv([CInv(t=MSG_TX, h=txid)]) + msg = msg_inv([CInv(t=MSG_WTX, h=wtxid)]) for p in self.peers: p.send_and_ping(msg) From 31a0e5f0905bfc6b22ceaaeca53466dfd74967ab Mon Sep 17 00:00:00 2001 From: fanquake Date: Fri, 30 Aug 2024 14:16:28 +0100 Subject: [PATCH 25/62] depends: Qt 5.15.16 --- depends/packages/qt.mk | 12 ++--- depends/patches/qt/fix-macos-linker.patch | 55 ----------------------- depends/patches/qt/memory_resource.patch | 33 -------------- depends/patches/qt/zlib-timebits64.patch | 31 ------------- doc/dependencies.md | 2 +- 5 files changed, 5 insertions(+), 128 deletions(-) delete mode 100644 depends/patches/qt/fix-macos-linker.patch delete mode 100644 depends/patches/qt/zlib-timebits64.patch diff --git a/depends/packages/qt.mk b/depends/packages/qt.mk index 727dcfc9ef..d41ac4e784 100644 --- a/depends/packages/qt.mk +++ b/depends/packages/qt.mk @@ -1,9 +1,9 @@ package=qt -$(package)_version=5.15.14 +$(package)_version=5.15.16 $(package)_download_path=https://download.qt.io/official_releases/qt/5.15/$($(package)_version)/submodules $(package)_suffix=everywhere-opensource-src-$($(package)_version).tar.xz $(package)_file_name=qtbase-$($(package)_suffix) -$(package)_sha256_hash=500d3b390048e9538c28b5f523dfea6936f9c2e10d24ab46580ff57d430b98be +$(package)_sha256_hash=b04815058c18058b6ba837206756a2c87d1391f07a0dcb0dd314f970fd041592 $(package)_linux_dependencies=freetype fontconfig libxcb libxkbcommon libxcb_util libxcb_util_render libxcb_util_keysyms libxcb_util_image libxcb_util_wm $(package)_qt_libs=corelib network widgets gui plugins testlib $(package)_linguist_tools = lrelease lupdate lconvert @@ -17,19 +17,17 @@ $(package)_patches += no_warnings_for_symbols.patch $(package)_patches += rcc_hardcode_timestamp.patch $(package)_patches += duplicate_lcqpafonts.patch $(package)_patches += guix_cross_lib_path.patch -$(package)_patches += fix-macos-linker.patch $(package)_patches += memory_resource.patch $(package)_patches += clang_18_libpng.patch $(package)_patches += utc_from_string_no_optimize.patch $(package)_patches += windows_lto.patch $(package)_patches += darwin_no_libm.patch -$(package)_patches += zlib-timebits64.patch $(package)_qttranslations_file_name=qttranslations-$($(package)_suffix) -$(package)_qttranslations_sha256_hash=5b94d1a11b566908622fcca2f8b799744d2f8a68da20be4caa5953ed63b10489 +$(package)_qttranslations_sha256_hash=415dbbb82a75dfc9a7be969e743bee54c0e6867be37bce4cf8f03da39f20112a $(package)_qttools_file_name=qttools-$($(package)_suffix) -$(package)_qttools_sha256_hash=12061a85baf5f4de8fbc795e1d3872b706f340211b9e70962caeffc6f5e89563 +$(package)_qttools_sha256_hash=1cab11887faca54af59f4995ee435c9ad98d194e9e6889c846692c8b6815fc1c $(package)_extra_sources = $($(package)_qttranslations_file_name) $(package)_extra_sources += $($(package)_qttools_file_name) @@ -223,7 +221,6 @@ endef define $(package)_preprocess_cmds cp $($(package)_patch_dir)/qt.pro qt.pro && \ cp $($(package)_patch_dir)/qttools_src.pro qttools/src/src.pro && \ - patch -p1 -i $($(package)_patch_dir)/fix-macos-linker.patch && \ patch -p1 -i $($(package)_patch_dir)/dont_hardcode_pwd.patch && \ patch -p1 -i $($(package)_patch_dir)/no-xlib.patch && \ patch -p1 -i $($(package)_patch_dir)/qtbase-moc-ignore-gcc-macro.patch && \ @@ -236,7 +233,6 @@ define $(package)_preprocess_cmds patch -p1 -i $($(package)_patch_dir)/guix_cross_lib_path.patch && \ patch -p1 -i $($(package)_patch_dir)/windows_lto.patch && \ patch -p1 -i $($(package)_patch_dir)/darwin_no_libm.patch && \ - patch -p1 -i $($(package)_patch_dir)/zlib-timebits64.patch && \ mkdir -p qtbase/mkspecs/macx-clang-linux &&\ cp -f qtbase/mkspecs/macx-clang/qplatformdefs.h qtbase/mkspecs/macx-clang-linux/ &&\ cp -f $($(package)_patch_dir)/mac-qmake.conf qtbase/mkspecs/macx-clang-linux/qmake.conf && \ diff --git a/depends/patches/qt/fix-macos-linker.patch b/depends/patches/qt/fix-macos-linker.patch deleted file mode 100644 index e439685656..0000000000 --- a/depends/patches/qt/fix-macos-linker.patch +++ /dev/null @@ -1,55 +0,0 @@ -qmake: Don't error out if QMAKE_DEFAULT_LIBDIRS is empty on macOS - -The new linker in Xcode 15 doesn't provide any default linker or -framework paths when requested via -v, but still seems to use the -default paths documented in the ld man page. - -We trust that linker will do the right thing, even if we don't -know of its default linker paths. - -We also need to opt out of the default fallback logic to -set the libdirs to /lib and /usr/lib. - -This may result in UnixMakefileGenerator::findLibraries finding -different libraries than expected, if additional paths are -passed with -L, which will then take precedence for qmake, -even if the linker itself will use the library from the -SDK's default paths. This should hopefully not be an issue -in practice, as we don't turn -lFoo into absolute paths in -qmake, so the only risk is that we're picking up the wrong -prl files and adding additional dependencies that the lib -in the SDK doesn't have. - -Upstream commits: - - Qt 5.15.16: Not yet publicly available. - - Qt dev: cdf64b0e47115cc473e1afd1472b4b09e130b2a5 - -For other Qt branches see -https://codereview.qt-project.org/q/I2347b26e2df0828471373b0e15b8c9089274c65d - ---- old/qtbase/mkspecs/features/toolchain.prf -+++ new/qtbase/mkspecs/features/toolchain.prf -@@ -288,9 +288,12 @@ isEmpty($${target_prefix}.INCDIRS) { - } - } - } -- isEmpty(QMAKE_DEFAULT_LIBDIRS)|isEmpty(QMAKE_DEFAULT_INCDIRS): \ -+ isEmpty(QMAKE_DEFAULT_INCDIRS): \ - !integrity: \ -- error("failed to parse default search paths from compiler output") -+ error("failed to parse default include paths from compiler output") -+ isEmpty(QMAKE_DEFAULT_LIBDIRS): \ -+ !integrity:!darwin: \ -+ error("failed to parse default library paths from compiler output") - QMAKE_DEFAULT_LIBDIRS = $$unique(QMAKE_DEFAULT_LIBDIRS) - } else: ghs { - cmd = $$QMAKE_CXX $$QMAKE_CXXFLAGS -$${LITERAL_HASH} -o /tmp/fake_output /tmp/fake_input.cpp -@@ -412,7 +415,7 @@ isEmpty($${target_prefix}.INCDIRS) { - QMAKE_DEFAULT_INCDIRS = $$split(INCLUDE, $$QMAKE_DIRLIST_SEP) - } - -- unix:if(!cross_compile|host_build) { -+ unix:!darwin:if(!cross_compile|host_build) { - isEmpty(QMAKE_DEFAULT_INCDIRS): QMAKE_DEFAULT_INCDIRS = /usr/include /usr/local/include - isEmpty(QMAKE_DEFAULT_LIBDIRS): QMAKE_DEFAULT_LIBDIRS = /lib /usr/lib - } diff --git a/depends/patches/qt/memory_resource.patch b/depends/patches/qt/memory_resource.patch index 312f0669f6..14e25121c0 100644 --- a/depends/patches/qt/memory_resource.patch +++ b/depends/patches/qt/memory_resource.patch @@ -14,36 +14,3 @@ and https://bugreports.qt.io/browse/QTBUG-114316 # include # include #else - ---- a/qtbase/src/corelib/global/qcompilerdetection.h -+++ b/qtbase/src/corelib/global/qcompilerdetection.h -@@ -1055,16 +1055,22 @@ - # endif // !_HAS_CONSTEXPR - # endif // !__GLIBCXX__ && !_LIBCPP_VERSION - # endif // Q_OS_QNX --# if (defined(Q_CC_CLANG) || defined(Q_CC_INTEL)) && defined(Q_OS_MAC) && defined(__GNUC_LIBSTD__) \ -- && ((__GNUC_LIBSTD__-0) * 100 + __GNUC_LIBSTD_MINOR__-0 <= 402) -+# if (defined(Q_CC_CLANG) || defined(Q_CC_INTEL)) && defined(Q_OS_MAC) -+# if defined(__GNUC_LIBSTD__) && ((__GNUC_LIBSTD__-0) * 100 + __GNUC_LIBSTD_MINOR__-0 <= 402) - // Apple has not updated libstdc++ since 2007, which means it does not have - // or std::move. Let's disable these features --# undef Q_COMPILER_INITIALIZER_LISTS --# undef Q_COMPILER_RVALUE_REFS --# undef Q_COMPILER_REF_QUALIFIERS -+# undef Q_COMPILER_INITIALIZER_LISTS -+# undef Q_COMPILER_RVALUE_REFS -+# undef Q_COMPILER_REF_QUALIFIERS - // Also disable , since it's clearly not there --# undef Q_COMPILER_ATOMICS --# endif -+# undef Q_COMPILER_ATOMICS -+# endif -+# if defined(__cpp_lib_memory_resource) \ -+ && ((defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && __MAC_OS_X_VERSION_MIN_REQUIRED < 140000) \ -+ || (defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && __IPHONE_OS_VERSION_MIN_REQUIRED < 170000)) -+# undef __cpp_lib_memory_resource // Only supported on macOS 14 and iOS 17 -+# endif -+# endif // (defined(Q_CC_CLANG) || defined(Q_CC_INTEL)) && defined(Q_OS_MAC) - # if defined(Q_CC_CLANG) && defined(Q_CC_INTEL) && Q_CC_INTEL >= 1500 - // ICC 15.x and 16.0 have their own implementation of std::atomic, which is activated when in Clang mode - // (probably because libc++'s on OS X failed to compile), but they're missing some diff --git a/depends/patches/qt/zlib-timebits64.patch b/depends/patches/qt/zlib-timebits64.patch deleted file mode 100644 index 139c1dfa77..0000000000 --- a/depends/patches/qt/zlib-timebits64.patch +++ /dev/null @@ -1,31 +0,0 @@ -From a566e156b3fa07b566ddbf6801b517a9dba04fa3 Mon Sep 17 00:00:00 2001 -From: Mark Adler -Date: Sat, 29 Jul 2023 22:13:09 -0700 -Subject: [PATCH] Avoid compiler complaints if _TIME_BITS defined when building - zlib. - -zlib does not use time_t, so _TIME_BITS is irrelevant. However it -may be defined anyway as part of a sledgehammer indiscriminately -applied to all builds. - -From https://github.com/madler/zlib/commit/a566e156b3fa07b566ddbf6801b517a9dba04fa3.patch ---- - qtbase/src/3rdparty/zlib/src/gzguts.h | 5 ++--- - 1 file changed, 2 insertions(+), 3 deletions(-) - -diff --git a/qtbase/src/3rdparty/zlib/src/gzguts.h b/qtbase/src/3rdparty/zlib/src/gzguts.h -index e23f831f5..f9375047e 100644 ---- a/qtbase/src/3rdparty/zlib/src/gzguts.h -+++ b/qtbase/src/3rdparty/zlib/src/gzguts.h -@@ -26,9 +26,8 @@ - # ifndef _LARGEFILE_SOURCE - # define _LARGEFILE_SOURCE 1 - # endif --# ifdef _FILE_OFFSET_BITS --# undef _FILE_OFFSET_BITS --# endif -+# undef _FILE_OFFSET_BITS -+# undef _TIME_BITS - #endif - - #ifdef HAVE_HIDDEN diff --git a/doc/dependencies.md b/doc/dependencies.md index ec068801a8..2163d52da3 100644 --- a/doc/dependencies.md +++ b/doc/dependencies.md @@ -29,7 +29,7 @@ You can find installation instructions in the `build-*.md` file for your platfor | [Fontconfig](../depends/packages/fontconfig.mk) | [link](https://www.freedesktop.org/wiki/Software/fontconfig/) | [2.12.6](https://github.com/bitcoin/bitcoin/pull/23495) | 2.6 | Yes | | [FreeType](../depends/packages/freetype.mk) | [link](https://freetype.org) | [2.11.0](https://github.com/bitcoin/bitcoin/commit/01544dd78ccc0b0474571da854e27adef97137fb) | 2.3.0 | Yes | | [qrencode](../depends/packages/qrencode.mk) | [link](https://fukuchi.org/works/qrencode/) | [4.1.1](https://github.com/bitcoin/bitcoin/pull/27312) | | No | -| [Qt](../depends/packages/qt.mk) | [link](https://download.qt.io/official_releases/qt/) | [5.15.14](https://github.com/bitcoin/bitcoin/pull/30198) | [5.11.3](https://github.com/bitcoin/bitcoin/pull/24132) | No | +| [Qt](../depends/packages/qt.mk) | [link](https://download.qt.io/official_releases/qt/) | [5.15.16](https://github.com/bitcoin/bitcoin/pull/30774) | [5.11.3](https://github.com/bitcoin/bitcoin/pull/24132) | No | ### Notifications | Dependency | Releases | Version used | Minimum required | Runtime | From d336b7ab85dd2b4f049f2f6fe176ffdd2621215e Mon Sep 17 00:00:00 2001 From: fanquake Date: Thu, 16 Jan 2025 11:09:56 +0000 Subject: [PATCH 26/62] Squashed 'src/leveldb/' changes from 688561cba8..04b5790928 04b5790928 Merge bitcoin-core/leveldb-subtree#46: Fix invalid pointer arithmetic in Hash (#1222) 59669817c5 Merge bitcoin-core/leveldb-subtree#40: cherry-pick: Remove leveldb::port::kLittleEndian. 73013d1a37 Merge bitcoin-core/leveldb-subtree#45: [jumbo] Add begin()/end() to Slice. a8844b23ab Fix invalid pointer arithmetic in Hash (#1222) be4dfc94b3 [jumbo] Add begin()/end() to Slice. 2e3c0131d3 Remove leveldb::port::kLittleEndian. git-subtree-dir: src/leveldb git-subtree-split: 04b57909285c7335c1908d53bcde9b90fe0439be --- CMakeLists.txt | 3 --- include/leveldb/slice.h | 3 +++ port/port_config.h.in | 6 ----- port/port_example.h | 4 ---- port/port_stdcxx.h | 2 -- util/coding.h | 50 ++++------------------------------------- util/hash.cc | 2 +- 7 files changed, 8 insertions(+), 62 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 1cb46256c2..cfd4faa325 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -28,9 +28,6 @@ option(LEVELDB_BUILD_TESTS "Build LevelDB's unit tests" ON) option(LEVELDB_BUILD_BENCHMARKS "Build LevelDB's benchmarks" ON) option(LEVELDB_INSTALL "Install LevelDB's header and library" ON) -include(TestBigEndian) -test_big_endian(LEVELDB_IS_BIG_ENDIAN) - include(CheckIncludeFile) check_include_file("unistd.h" HAVE_UNISTD_H) diff --git a/include/leveldb/slice.h b/include/leveldb/slice.h index 2df417dc31..44de9038c8 100644 --- a/include/leveldb/slice.h +++ b/include/leveldb/slice.h @@ -52,6 +52,9 @@ class LEVELDB_EXPORT Slice { // Return true iff the length of the referenced data is zero bool empty() const { return size_ == 0; } + const char* begin() const { return data(); } + const char* end() const { return data() + size(); } + // Return the ith byte in the referenced data. // REQUIRES: n < size() char operator[](size_t n) const { diff --git a/port/port_config.h.in b/port/port_config.h.in index 21273153a3..272671d39f 100644 --- a/port/port_config.h.in +++ b/port/port_config.h.in @@ -30,10 +30,4 @@ #cmakedefine01 HAVE_SNAPPY #endif // !defined(HAVE_SNAPPY) -// Define to 1 if your processor stores words with the most significant byte -// first (like Motorola and SPARC, unlike Intel and VAX). -#if !defined(LEVELDB_IS_BIG_ENDIAN) -#cmakedefine01 LEVELDB_IS_BIG_ENDIAN -#endif // !defined(LEVELDB_IS_BIG_ENDIAN) - #endif // STORAGE_LEVELDB_PORT_PORT_CONFIG_H_ \ No newline at end of file diff --git a/port/port_example.h b/port/port_example.h index 1a8fca24b3..a665910d95 100644 --- a/port/port_example.h +++ b/port/port_example.h @@ -18,10 +18,6 @@ namespace port { // TODO(jorlow): Many of these belong more in the environment class rather than // here. We should try moving them and see if it affects perf. -// The following boolean constant must be true on a little-endian machine -// and false otherwise. -static const bool kLittleEndian = true /* or some other expression */; - // ------------------ Threading ------------------- // A Mutex represents an exclusive lock. diff --git a/port/port_stdcxx.h b/port/port_stdcxx.h index e9cb0e53af..2bda48db42 100644 --- a/port/port_stdcxx.h +++ b/port/port_stdcxx.h @@ -41,8 +41,6 @@ namespace leveldb { namespace port { -static const bool kLittleEndian = !LEVELDB_IS_BIG_ENDIAN; - class CondVar; // Thinly wraps std::mutex. diff --git a/util/coding.h b/util/coding.h index 1983ae7173..f0bb57b8e4 100644 --- a/util/coding.h +++ b/util/coding.h @@ -48,29 +48,13 @@ int VarintLength(uint64_t v); char* EncodeVarint32(char* dst, uint32_t value); char* EncodeVarint64(char* dst, uint64_t value); -// TODO(costan): Remove port::kLittleEndian and the fast paths based on -// std::memcpy when clang learns to optimize the generic code, as -// described in https://bugs.llvm.org/show_bug.cgi?id=41761 -// -// The platform-independent code in DecodeFixed{32,64}() gets optimized to mov -// on x86 and ldr on ARM64, by both clang and gcc. However, only gcc optimizes -// the platform-independent code in EncodeFixed{32,64}() to mov / str. - // Lower-level versions of Put... that write directly into a character buffer // REQUIRES: dst has enough space for the value being written inline void EncodeFixed32(char* dst, uint32_t value) { uint8_t* const buffer = reinterpret_cast(dst); - if (port::kLittleEndian) { - // Fast path for little-endian CPUs. All major compilers optimize this to a - // single mov (x86_64) / str (ARM) instruction. - std::memcpy(buffer, &value, sizeof(uint32_t)); - return; - } - - // Platform-independent code. - // Currently, only gcc optimizes this to a single mov / str instruction. + // Recent clang and gcc optimize this to a single mov / str instruction. buffer[0] = static_cast(value); buffer[1] = static_cast(value >> 8); buffer[2] = static_cast(value >> 16); @@ -80,15 +64,7 @@ inline void EncodeFixed32(char* dst, uint32_t value) { inline void EncodeFixed64(char* dst, uint64_t value) { uint8_t* const buffer = reinterpret_cast(dst); - if (port::kLittleEndian) { - // Fast path for little-endian CPUs. All major compilers optimize this to a - // single mov (x86_64) / str (ARM) instruction. - std::memcpy(buffer, &value, sizeof(uint64_t)); - return; - } - - // Platform-independent code. - // Currently, only gcc optimizes this to a single mov / str instruction. + // Recent clang and gcc optimize this to a single mov / str instruction. buffer[0] = static_cast(value); buffer[1] = static_cast(value >> 8); buffer[2] = static_cast(value >> 16); @@ -105,16 +81,7 @@ inline void EncodeFixed64(char* dst, uint64_t value) { inline uint32_t DecodeFixed32(const char* ptr) { const uint8_t* const buffer = reinterpret_cast(ptr); - if (port::kLittleEndian) { - // Fast path for little-endian CPUs. All major compilers optimize this to a - // single mov (x86_64) / ldr (ARM) instruction. - uint32_t result; - std::memcpy(&result, buffer, sizeof(uint32_t)); - return result; - } - - // Platform-independent code. - // Clang and gcc optimize this to a single mov / ldr instruction. + // Recent clang and gcc optimize this to a single mov / ldr instruction. return (static_cast(buffer[0])) | (static_cast(buffer[1]) << 8) | (static_cast(buffer[2]) << 16) | @@ -124,16 +91,7 @@ inline uint32_t DecodeFixed32(const char* ptr) { inline uint64_t DecodeFixed64(const char* ptr) { const uint8_t* const buffer = reinterpret_cast(ptr); - if (port::kLittleEndian) { - // Fast path for little-endian CPUs. All major compilers optimize this to a - // single mov (x86_64) / ldr (ARM) instruction. - uint64_t result; - std::memcpy(&result, buffer, sizeof(uint64_t)); - return result; - } - - // Platform-independent code. - // Clang and gcc optimize this to a single mov / ldr instruction. + // Recent clang and gcc optimize this to a single mov / ldr instruction. return (static_cast(buffer[0])) | (static_cast(buffer[1]) << 8) | (static_cast(buffer[2]) << 16) | diff --git a/util/hash.cc b/util/hash.cc index dd47c110ee..5432b6180d 100644 --- a/util/hash.cc +++ b/util/hash.cc @@ -27,7 +27,7 @@ uint32_t Hash(const char* data, size_t n, uint32_t seed) { uint32_t h = seed ^ (n * m); // Pick up four bytes at a time - while (data + 4 <= limit) { + while (limit - data >= 4) { uint32_t w = DecodeFixed32(data); data += 4; h += w; From 910a11fa66305f90b0f3a8aa9d2055b58a2d8d80 Mon Sep 17 00:00:00 2001 From: fanquake Date: Wed, 28 Aug 2024 15:28:30 +0100 Subject: [PATCH 27/62] build: remove LEVELDB_IS_BIG_ENDIAN --- cmake/leveldb.cmake | 1 - 1 file changed, 1 deletion(-) diff --git a/cmake/leveldb.cmake b/cmake/leveldb.cmake index 823a5d8e3d..2eae3a1f0a 100644 --- a/cmake/leveldb.cmake +++ b/cmake/leveldb.cmake @@ -60,7 +60,6 @@ target_compile_definitions(leveldb HAVE_FULLFSYNC=$ HAVE_O_CLOEXEC=$ FALLTHROUGH_INTENDED=[[fallthrough]] - LEVELDB_IS_BIG_ENDIAN=$ $<$>:LEVELDB_PLATFORM_POSIX> $<$:LEVELDB_PLATFORM_WINDOWS> $<$:_UNICODE;UNICODE> From 1db331ba764d27537d82abd91dde50fc9d7e0ff4 Mon Sep 17 00:00:00 2001 From: Cory Fields Date: Thu, 16 Jan 2025 21:01:40 +0000 Subject: [PATCH 28/62] init: allow a new xor key to be written if the blocksdir is newly created A subsequent commit will add a .lock file to this dir at startup, meaning that the blocksdir is never empty by the time the xor key is being read/written. Ignore all hidden files when determining if this is the first run. --- src/node/blockstorage.cpp | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index 07878a5602..2d25274c86 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -1143,7 +1143,19 @@ static auto InitBlocksdirXorKey(const BlockManager::Options& opts) // size of the XOR-key file. std::array xor_key{}; - if (opts.use_xor && fs::is_empty(opts.blocks_dir)) { + // Consider this to be the first run if the blocksdir contains only hidden + // files (those which start with a .). Checking for a fully-empty dir would + // be too aggressive as a .lock file may have already been written. + bool first_run = true; + for (const auto& entry : fs::directory_iterator(opts.blocks_dir)) { + const std::string path = fs::PathToString(entry.path().filename()); + if (!entry.is_regular_file() || !path.starts_with('.')) { + first_run = false; + break; + } + } + + if (opts.use_xor && first_run) { // Only use random fresh key when the boolean option is set and on the // very first start of the program. FastRandomContext{}.fillrand(xor_key); From cabb2e5c24282c88ccc7fcaede854eaa8d7ff162 Mon Sep 17 00:00:00 2001 From: Cory Fields Date: Wed, 15 Jan 2025 18:03:43 +0000 Subject: [PATCH 29/62] refactor: introduce a more general LockDirectories for init No functional change. This is in preparation for adding additional directory locks on startup. --- src/bitcoind.cpp | 6 +++--- src/init.cpp | 31 ++++++++++++++++------------- src/init.h | 6 +++--- src/node/interfaces.cpp | 2 +- test/functional/feature_filelock.py | 2 +- 5 files changed, 25 insertions(+), 22 deletions(-) diff --git a/src/bitcoind.cpp b/src/bitcoind.cpp index d210e2c8ba..ceb3c99410 100644 --- a/src/bitcoind.cpp +++ b/src/bitcoind.cpp @@ -228,10 +228,10 @@ static bool AppInit(NodeContext& node) return InitError(Untranslated("-daemon is not supported on this operating system")); #endif // HAVE_DECL_FORK } - // Lock data directory after daemonization - if (!AppInitLockDataDirectory()) + // Lock critical directories after daemonization + if (!AppInitLockDirectories()) { - // If locking the data directory failed, exit immediately + // If locking a directory failed, exit immediately return false; } fRet = AppInitInterfaces(node) && AppInitMain(node); diff --git a/src/init.cpp b/src/init.cpp index 9887c07165..13ef7ee13e 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -1072,19 +1072,22 @@ bool AppInitParameterInteraction(const ArgsManager& args) return true; } -static bool LockDataDirectory(bool probeOnly) +static bool LockDirectory(const fs::path& dir, bool probeOnly) { - // Make sure only a single Bitcoin process is using the data directory. - const fs::path& datadir = gArgs.GetDataDirNet(); - switch (util::LockDirectory(datadir, ".lock", probeOnly)) { + // Make sure only a single process is using the directory. + switch (util::LockDirectory(dir, ".lock", probeOnly)) { case util::LockResult::ErrorWrite: - return InitError(strprintf(_("Cannot write to data directory '%s'; check permissions."), fs::PathToString(datadir))); + return InitError(strprintf(_("Cannot write to directory '%s'; check permissions."), fs::PathToString(dir))); case util::LockResult::ErrorLock: - return InitError(strprintf(_("Cannot obtain a lock on data directory %s. %s is probably already running."), fs::PathToString(datadir), CLIENT_NAME)); + return InitError(strprintf(_("Cannot obtain a lock on directory %s. %s is probably already running."), fs::PathToString(dir), CLIENT_NAME)); case util::LockResult::Success: return true; } // no default case, so the compiler can warn about missing cases assert(false); } +static bool LockDirectories(bool probeOnly) +{ + return LockDirectory(gArgs.GetDataDirNet(), probeOnly); +} bool AppInitSanityChecks(const kernel::Context& kernel) { @@ -1099,19 +1102,19 @@ bool AppInitSanityChecks(const kernel::Context& kernel) return InitError(strprintf(_("Elliptic curve cryptography sanity check failure. %s is shutting down."), CLIENT_NAME)); } - // Probe the data directory lock to give an early error message, if possible - // We cannot hold the data directory lock here, as the forking for daemon() hasn't yet happened, - // and a fork will cause weird behavior to it. - return LockDataDirectory(true); + // Probe the directory locks to give an early error message, if possible + // We cannot hold the directory locks here, as the forking for daemon() hasn't yet happened, + // and a fork will cause weird behavior to them. + return LockDirectories(true); } -bool AppInitLockDataDirectory() +bool AppInitLockDirectories() { - // After daemonization get the data directory lock again and hold on to it until exit + // After daemonization get the directory locks again and hold on to them until exit // This creates a slight window for a race condition to happen, however this condition is harmless: it // will at most make us exit without printing a message to console. - if (!LockDataDirectory(false)) { - // Detailed error printed inside LockDataDirectory + if (!LockDirectories(false)) { + // Detailed error printed inside LockDirectory return false; } return true; diff --git a/src/init.h b/src/init.h index 6d8a35d80e..6b60a4e147 100644 --- a/src/init.h +++ b/src/init.h @@ -55,11 +55,11 @@ bool AppInitParameterInteraction(const ArgsManager& args); */ bool AppInitSanityChecks(const kernel::Context& kernel); /** - * Lock bitcoin core data directory. + * Lock bitcoin core critical directories. * @note This should only be done after daemonization. Do not call Shutdown() if this function fails. * @pre Parameters should be parsed and config file should be read, AppInitSanityChecks should have been called. */ -bool AppInitLockDataDirectory(); +bool AppInitLockDirectories(); /** * Initialize node and wallet interface pointers. Has no prerequisites or side effects besides allocating memory. */ @@ -67,7 +67,7 @@ bool AppInitInterfaces(node::NodeContext& node); /** * Bitcoin core main initialization. * @note This should only be done after daemonization. Call Shutdown() if this function fails. - * @pre Parameters should be parsed and config file should be read, AppInitLockDataDirectory should have been called. + * @pre Parameters should be parsed and config file should be read, AppInitLockDirectories should have been called. */ bool AppInitMain(node::NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info = nullptr); diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp index f3b8c6a072..c084997cf7 100644 --- a/src/node/interfaces.cpp +++ b/src/node/interfaces.cpp @@ -116,7 +116,7 @@ class NodeImpl : public Node m_context->ecc_context = std::make_unique(); if (!AppInitSanityChecks(*m_context->kernel)) return false; - if (!AppInitLockDataDirectory()) return false; + if (!AppInitLockDirectories()) return false; if (!AppInitInterfaces(*m_context)) return false; return true; diff --git a/test/functional/feature_filelock.py b/test/functional/feature_filelock.py index f56643c62e..7c05ca4f39 100755 --- a/test/functional/feature_filelock.py +++ b/test/functional/feature_filelock.py @@ -30,7 +30,7 @@ def run_test(self): self.log.info(f"Using datadir {datadir}") self.log.info("Check that we can't start a second bitcoind instance using the same datadir") - expected_msg = f"Error: Cannot obtain a lock on data directory {datadir}. {self.config['environment']['CLIENT_NAME']} is probably already running." + expected_msg = f"Error: Cannot obtain a lock on directory {datadir}. {self.config['environment']['CLIENT_NAME']} is probably already running." self.nodes[1].assert_start_raises_init_error(extra_args=[f'-datadir={self.nodes[0].datadir_path}', '-noserver'], expected_msg=expected_msg) self.log.info("Check that cookie and PID file are not deleted when attempting to start a second bitcoind using the same datadir") From bdc0a68e676f237bcbb5195a60bb08bb34123e71 Mon Sep 17 00:00:00 2001 From: Cory Fields Date: Wed, 15 Jan 2025 19:11:33 +0000 Subject: [PATCH 30/62] init: lock blocksdir in addition to datadir This guards against 2 processes running with separate datadirs but the same blocksdir. It's not likely to happen currently, but may be more relevant in the future with applications using the kernel. Note that the kernel does not currently do any dir locking, but it should. --- src/init.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/init.cpp b/src/init.cpp index 13ef7ee13e..002f570e98 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -1086,7 +1086,8 @@ static bool LockDirectory(const fs::path& dir, bool probeOnly) } static bool LockDirectories(bool probeOnly) { - return LockDirectory(gArgs.GetDataDirNet(), probeOnly); + return LockDirectory(gArgs.GetDataDirNet(), probeOnly) && \ + LockDirectory(gArgs.GetBlocksDirPath(), probeOnly); } bool AppInitSanityChecks(const kernel::Context& kernel) From 2656a5658c14b43c32959db7235e9db55a17d4c8 Mon Sep 17 00:00:00 2001 From: Cory Fields Date: Thu, 16 Jan 2025 15:29:13 +0000 Subject: [PATCH 31/62] tests: add a test for the new blocksdir lock --- test/functional/feature_filelock.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/test/functional/feature_filelock.py b/test/functional/feature_filelock.py index 7c05ca4f39..aa4ca4b45a 100755 --- a/test/functional/feature_filelock.py +++ b/test/functional/feature_filelock.py @@ -27,13 +27,19 @@ def setup_network(self): def run_test(self): datadir = self.nodes[0].chain_path + blocksdir = self.nodes[0].blocks_path self.log.info(f"Using datadir {datadir}") + self.log.info(f"Using blocksdir {blocksdir}") self.log.info("Check that we can't start a second bitcoind instance using the same datadir") expected_msg = f"Error: Cannot obtain a lock on directory {datadir}. {self.config['environment']['CLIENT_NAME']} is probably already running." self.nodes[1].assert_start_raises_init_error(extra_args=[f'-datadir={self.nodes[0].datadir_path}', '-noserver'], expected_msg=expected_msg) - self.log.info("Check that cookie and PID file are not deleted when attempting to start a second bitcoind using the same datadir") + self.log.info("Check that we can't start a second bitcoind instance using the same blocksdir") + expected_msg = f"Error: Cannot obtain a lock on directory {blocksdir}. {self.config['environment']['CLIENT_NAME']} is probably already running." + self.nodes[1].assert_start_raises_init_error(extra_args=[f'-blocksdir={self.nodes[0].datadir_path}', '-noserver'], expected_msg=expected_msg) + + self.log.info("Check that cookie and PID file are not deleted when attempting to start a second bitcoind using the same datadir/blocksdir") cookie_file = datadir / ".cookie" assert cookie_file.exists() # should not be deleted during the second bitcoind instance shutdown pid_file = datadir / BITCOIN_PID_FILENAME_DEFAULT From 8996fef8aebd99aa9e1dec12b0087d79c0993a84 Mon Sep 17 00:00:00 2001 From: Sebastian Falbesoner Date: Thu, 16 Jan 2025 20:09:08 +0100 Subject: [PATCH 32/62] test: p2p: check that INV messages not matching wtxidrelay are ignored --- test/functional/p2p_tx_download.py | 31 ++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/test/functional/p2p_tx_download.py b/test/functional/p2p_tx_download.py index f6c28b00d9..b1dad1d850 100755 --- a/test/functional/p2p_tx_download.py +++ b/test/functional/p2p_tx_download.py @@ -270,6 +270,36 @@ def test_rejects_filter_reset(self): node.bumpmocktime(MAX_GETDATA_INBOUND_WAIT) peer.wait_for_getdata([int(low_fee_tx['wtxid'], 16)]) + def test_inv_wtxidrelay_mismatch(self): + self.log.info("Check that INV messages that don't match the wtxidrelay setting are ignored") + node = self.nodes[0] + wtxidrelay_on_peer = node.add_p2p_connection(TestP2PConn(wtxidrelay=True)) + wtxidrelay_off_peer = node.add_p2p_connection(TestP2PConn(wtxidrelay=False)) + random_tx = self.wallet.create_self_transfer() + + # MSG_TX INV from wtxidrelay=True peer -> mismatch, ignored + wtxidrelay_on_peer.send_and_ping(msg_inv([CInv(t=MSG_TX, h=int(random_tx['txid'], 16))])) + node.setmocktime(int(time.time())) + node.bumpmocktime(MAX_GETDATA_INBOUND_WAIT) + wtxidrelay_on_peer.sync_with_ping() + assert_equal(wtxidrelay_on_peer.tx_getdata_count, 0) + + # MSG_WTX INV from wtxidrelay=False peer -> mismatch, ignored + wtxidrelay_off_peer.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=int(random_tx['wtxid'], 16))])) + node.bumpmocktime(MAX_GETDATA_INBOUND_WAIT) + wtxidrelay_off_peer.sync_with_ping() + assert_equal(wtxidrelay_off_peer.tx_getdata_count, 0) + + # MSG_TX INV from wtxidrelay=False peer works + wtxidrelay_off_peer.send_and_ping(msg_inv([CInv(t=MSG_TX, h=int(random_tx['txid'], 16))])) + node.bumpmocktime(MAX_GETDATA_INBOUND_WAIT) + wtxidrelay_off_peer.wait_for_getdata([int(random_tx['txid'], 16)]) + + # MSG_WTX INV from wtxidrelay=True peer works + wtxidrelay_on_peer.send_and_ping(msg_inv([CInv(t=MSG_WTX, h=int(random_tx['wtxid'], 16))])) + node.bumpmocktime(MAX_GETDATA_INBOUND_WAIT) + wtxidrelay_on_peer.wait_for_getdata([int(random_tx['wtxid'], 16)]) + def run_test(self): self.wallet = MiniWallet(self.nodes[0]) @@ -291,6 +321,7 @@ def run_test(self): (self.test_inv_block, True), (self.test_tx_requests, True), (self.test_rejects_filter_reset, False), + (self.test_inv_wtxidrelay_mismatch, False), ]: self.stop_nodes() self.start_nodes() From faaabfaea768deb7767c489d32fd2097fd180872 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Thu, 2 Jan 2025 14:21:07 +0100 Subject: [PATCH 33/62] ci: Bump centos stream 10 --- .cirrus.yml | 2 +- ci/test/00_setup_env_native_centos.sh | 5 ++--- ci/test/03_test_script.sh | 4 +--- 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/.cirrus.yml b/.cirrus.yml index b3ac31881e..9be0694369 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -140,7 +140,7 @@ task: FILE_ENV: "./ci/test/00_setup_env_win64.sh" task: - name: 'CentOS, dash, gui' + name: 'CentOS, depends, gui' << : *GLOBAL_TASK_TEMPLATE persistent_worker: labels: diff --git a/ci/test/00_setup_env_native_centos.sh b/ci/test/00_setup_env_native_centos.sh index 878778506f..ff4b9bc570 100755 --- a/ci/test/00_setup_env_native_centos.sh +++ b/ci/test/00_setup_env_native_centos.sh @@ -7,9 +7,8 @@ export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_native_centos -export CI_IMAGE_NAME_TAG="quay.io/centos/centos:stream9" -export STREAM_GCC_V="12" -export CI_BASE_PACKAGES="gcc-toolset-${STREAM_GCC_V}-gcc-c++ glibc-devel gcc-toolset-${STREAM_GCC_V}-libstdc++-devel ccache make git python3 python3-pip which patch xz procps-ng dash rsync coreutils bison e2fsprogs cmake" +export CI_IMAGE_NAME_TAG="quay.io/centos/centos:stream10" +export CI_BASE_PACKAGES="gcc-c++ glibc-devel libstdc++-devel ccache make git python3 python3-pip which patch xz procps-ng ksh rsync coreutils bison e2fsprogs cmake" export PIP_PACKAGES="pyzmq" export GOAL="install" export BITCOIN_CONFIG="-DWITH_ZMQ=ON -DBUILD_GUI=ON -DREDUCE_EXPORTS=ON" diff --git a/ci/test/03_test_script.sh b/ci/test/03_test_script.sh index 6e77a8927c..bd5c86bfbe 100755 --- a/ci/test/03_test_script.sh +++ b/ci/test/03_test_script.sh @@ -92,9 +92,7 @@ fi if [ -z "$NO_DEPENDS" ]; then if [[ $CI_IMAGE_NAME_TAG == *centos* ]]; then - SHELL_OPTS="CONFIG_SHELL=/bin/dash" - # shellcheck disable=SC1090 - source "/opt/rh/gcc-toolset-${STREAM_GCC_V}/enable" + SHELL_OPTS="CONFIG_SHELL=/bin/ksh" # Temporarily use ksh instead of dash, until https://bugzilla.redhat.com/show_bug.cgi?id=2335416 is fixed. else SHELL_OPTS="CONFIG_SHELL=" fi From 6e29de21010fc5213176a6ba29f754ca72612ea0 Mon Sep 17 00:00:00 2001 From: David Gumberg Date: Mon, 13 Jan 2025 08:39:07 -0800 Subject: [PATCH 34/62] ci: Supply `platform` argument to docker commands. Later versions of docker require a `--platform` argument when building and running from a platform-specific image that differs from the host platform. --- ci/test/00_setup_env.sh | 3 +++ ci/test/00_setup_env_arm.sh | 3 ++- ci/test/00_setup_env_i686_multiprocess.sh | 3 ++- ci/test/00_setup_env_s390x.sh | 3 ++- ci/test/00_setup_env_win64.sh | 3 ++- ci/test/02_run_container.sh | 4 +++- 6 files changed, 14 insertions(+), 5 deletions(-) diff --git a/ci/test/00_setup_env.sh b/ci/test/00_setup_env.sh index d7d527c7f6..8163680ca9 100755 --- a/ci/test/00_setup_env.sh +++ b/ci/test/00_setup_env.sh @@ -68,3 +68,6 @@ export CI_BASE_PACKAGES=${CI_BASE_PACKAGES:-build-essential pkgconf curl ca-cert export GOAL=${GOAL:-install} export DIR_QA_ASSETS=${DIR_QA_ASSETS:-${BASE_SCRATCH_DIR}/qa-assets} export CI_RETRY_EXE=${CI_RETRY_EXE:-"retry --"} + +# The --platform argument used with `docker build` and `docker run`. +export CI_IMAGE_PLATFORM=${CI_IMAGE_PLATFORM:-"linux"} # Force linux, but use native arch by default diff --git a/ci/test/00_setup_env_arm.sh b/ci/test/00_setup_env_arm.sh index 749ae86cb2..67e43aca0d 100755 --- a/ci/test/00_setup_env_arm.sh +++ b/ci/test/00_setup_env_arm.sh @@ -10,7 +10,8 @@ export HOST=arm-linux-gnueabihf export DPKG_ADD_ARCH="armhf" export PACKAGES="python3-zmq g++-arm-linux-gnueabihf busybox libc6:armhf libstdc++6:armhf libfontconfig1:armhf libxcb1:armhf" export CONTAINER_NAME=ci_arm_linux -export CI_IMAGE_NAME_TAG="docker.io/arm64v8/debian:bookworm" # Check that https://packages.debian.org/bookworm/g++-arm-linux-gnueabihf (version 12.2, similar to guix) can cross-compile +export CI_IMAGE_NAME_TAG="docker.io/debian:bookworm" # Check that https://packages.debian.org/bookworm/g++-arm-linux-gnueabihf (version 12.2, similar to guix) can cross-compile +export CI_IMAGE_PLATFORM="linux/arm64" export USE_BUSY_BOX=true export RUN_UNIT_TESTS=true export RUN_FUNCTIONAL_TESTS=false diff --git a/ci/test/00_setup_env_i686_multiprocess.sh b/ci/test/00_setup_env_i686_multiprocess.sh index 5810ae8639..e659486407 100755 --- a/ci/test/00_setup_env_i686_multiprocess.sh +++ b/ci/test/00_setup_env_i686_multiprocess.sh @@ -8,7 +8,8 @@ export LC_ALL=C.UTF-8 export HOST=i686-pc-linux-gnu export CONTAINER_NAME=ci_i686_multiprocess -export CI_IMAGE_NAME_TAG="docker.io/amd64/ubuntu:24.04" +export CI_IMAGE_NAME_TAG="docker.io/ubuntu:24.04" +export CI_IMAGE_PLATFORM="linux/amd64" export PACKAGES="llvm clang g++-multilib" export DEP_OPTS="DEBUG=1 MULTIPROCESS=1" export GOAL="install" diff --git a/ci/test/00_setup_env_s390x.sh b/ci/test/00_setup_env_s390x.sh index eb12bc6fd1..0e0ddf3382 100755 --- a/ci/test/00_setup_env_s390x.sh +++ b/ci/test/00_setup_env_s390x.sh @@ -9,7 +9,8 @@ export LC_ALL=C.UTF-8 export HOST=s390x-linux-gnu export PACKAGES="python3-zmq" export CONTAINER_NAME=ci_s390x -export CI_IMAGE_NAME_TAG="docker.io/s390x/ubuntu:24.04" +export CI_IMAGE_NAME_TAG="docker.io/ubuntu:24.04" +export CI_IMAGE_PLATFORM="linux/s390x" export TEST_RUNNER_EXTRA="--exclude rpc_bind,feature_bind_extra" # Excluded for now, see https://github.com/bitcoin/bitcoin/issues/17765#issuecomment-602068547 export RUN_FUNCTIONAL_TESTS=true export GOAL="install" diff --git a/ci/test/00_setup_env_win64.sh b/ci/test/00_setup_env_win64.sh index 25da64c524..985b2941bf 100755 --- a/ci/test/00_setup_env_win64.sh +++ b/ci/test/00_setup_env_win64.sh @@ -7,7 +7,8 @@ export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_win64 -export CI_IMAGE_NAME_TAG="docker.io/amd64/debian:bookworm" # Check that https://packages.debian.org/bookworm/g++-mingw-w64-x86-64-posix (version 12.2, similar to guix) can cross-compile +export CI_IMAGE_NAME_TAG="docker.io/debian:bookworm" # Check that https://packages.debian.org/bookworm/g++-mingw-w64-x86-64-posix (version 12.2, similar to guix) can cross-compile +export CI_IMAGE_PLATFORM="linux/amd64" export HOST=x86_64-w64-mingw32 export DPKG_ADD_ARCH="i386" export PACKAGES="nsis g++-mingw-w64-x86-64-posix wine-binfmt wine64 wine32 file" diff --git a/ci/test/02_run_container.sh b/ci/test/02_run_container.sh index ce01db325c..28d0898a4a 100755 --- a/ci/test/02_run_container.sh +++ b/ci/test/02_run_container.sh @@ -14,7 +14,7 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then # Though, exclude those with newlines to avoid parsing problems. python3 -c 'import os; [print(f"{key}={value}") for key, value in os.environ.items() if "\n" not in value and "HOME" != key and "PATH" != key and "USER" != key]' | tee "/tmp/env-$USER-$CONTAINER_NAME" # System-dependent env vars must be kept as is. So read them from the container. - docker run --rm "${CI_IMAGE_NAME_TAG}" bash -c "env | grep --extended-regexp '^(HOME|PATH|USER)='" | tee --append "/tmp/env-$USER-$CONTAINER_NAME" + docker run --platform="${CI_IMAGE_PLATFORM}" --rm "${CI_IMAGE_NAME_TAG}" bash -c "env | grep --extended-regexp '^(HOME|PATH|USER)='" | tee --append "/tmp/env-$USER-$CONTAINER_NAME" # Env vars during the build can not be changed. For example, a modified # $MAKEJOBS is ignored in the build process. Use --cpuset-cpus as an @@ -31,6 +31,7 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then --build-arg "CI_IMAGE_NAME_TAG=${CI_IMAGE_NAME_TAG}" \ --build-arg "FILE_ENV=${FILE_ENV}" \ $MAYBE_CPUSET \ + --platform="${CI_IMAGE_PLATFORM}" \ --label="${CI_IMAGE_LABEL}" \ --tag="${CONTAINER_NAME}" \ "${BASE_READ_ONLY_DIR}" @@ -100,6 +101,7 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then --env-file /tmp/env-$USER-$CONTAINER_NAME \ --name "$CONTAINER_NAME" \ --network ci-ip6net \ + --platform="${CI_IMAGE_PLATFORM}" \ "$CONTAINER_NAME") export CI_CONTAINER_ID export CI_EXEC_CMD_PREFIX="docker exec ${CI_CONTAINER_ID}" From d3339a7cd5f10e8a6b2a6c64daa66cd71a3e81d3 Mon Sep 17 00:00:00 2001 From: Vasil Dimov Date: Mon, 20 Jan 2025 14:32:20 +0100 Subject: [PATCH 35/62] util: fix compiler warning about deprecated space before _MiB ``` src/util/byte_units.h:13:29: error: identifier '_MiB' preceded by whitespace in a literal operator declaration is deprecated [-Werror,-Wdeprecated-literal-operator] 13 | constexpr size_t operator"" _MiB(unsigned long long mebibytes) | ~~~~~~~~~~~^~~~ | operator""_MiB 1 error generated. ``` Clang 20.0.0 --- src/util/byte_units.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/util/byte_units.h b/src/util/byte_units.h index 894f057a7e..3d6a9627d8 100644 --- a/src/util/byte_units.h +++ b/src/util/byte_units.h @@ -10,7 +10,7 @@ #include //! Overflow-safe conversion of MiB to bytes. -constexpr size_t operator"" _MiB(unsigned long long mebibytes) +constexpr size_t operator""_MiB(unsigned long long mebibytes) { auto bytes{CheckedLeftShift(mebibytes, 20)}; if (!bytes || *bytes > std::numeric_limits::max()) { From fa9593efc2e19d9dfa9a62c8a3d796aad888baa0 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Tue, 21 Jan 2025 11:26:55 +0100 Subject: [PATCH 36/62] test: Use high-level python types Using the built-in open() and pathlib is identical and requires less code. Also, remove redundant sync_blocks call. --- .../feature_remove_pruned_files_on_startup.py | 36 +++++++++---------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/test/functional/feature_remove_pruned_files_on_startup.py b/test/functional/feature_remove_pruned_files_on_startup.py index 2e689f2920..4116ee1a80 100755 --- a/test/functional/feature_remove_pruned_files_on_startup.py +++ b/test/functional/feature_remove_pruned_files_on_startup.py @@ -1,13 +1,13 @@ #!/usr/bin/env python3 -# Copyright (c) 2022 The Bitcoin Core developers +# Copyright (c) 2022-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test removing undeleted pruned blk files on startup.""" import platform -import os from test_framework.test_framework import BitcoinTestFramework + class FeatureRemovePrunedFilesOnStartupTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 @@ -18,7 +18,6 @@ def mine_batches(self, blocks): for _ in range(n): self.generate(self.nodes[0], 250) self.generate(self.nodes[0], blocks % 250) - self.sync_blocks() def run_test(self): blk0 = self.nodes[0].blocks_path / "blk00000.dat" @@ -26,30 +25,31 @@ def run_test(self): blk1 = self.nodes[0].blocks_path / "blk00001.dat" rev1 = self.nodes[0].blocks_path / "rev00001.dat" self.mine_batches(800) - fo1 = os.open(blk0, os.O_RDONLY) - fo2 = os.open(rev1, os.O_RDONLY) - fd1 = os.fdopen(fo1) - fd2 = os.fdopen(fo2) + + self.log.info("Open some files to check that this may delay deletion") + fd1 = open(blk0, "rb") + fd2 = open(rev1, "rb") self.nodes[0].pruneblockchain(600) # Windows systems will not remove files with an open fd if platform.system() != 'Windows': - assert not os.path.exists(blk0) - assert not os.path.exists(rev0) - assert not os.path.exists(blk1) - assert not os.path.exists(rev1) + assert not blk0.exists() + assert not rev0.exists() + assert not blk1.exists() + assert not rev1.exists() else: - assert os.path.exists(blk0) - assert not os.path.exists(rev0) - assert not os.path.exists(blk1) - assert os.path.exists(rev1) + assert blk0.exists() + assert not rev0.exists() + assert not blk1.exists() + assert rev1.exists() - # Check that the files are removed on restart once the fds are closed + self.log.info("Check that the files are removed on restart once the fds are closed") fd1.close() fd2.close() self.restart_node(0) - assert not os.path.exists(blk0) - assert not os.path.exists(rev1) + assert not blk0.exists() + assert not rev1.exists() + if __name__ == '__main__': FeatureRemovePrunedFilesOnStartupTest(__file__).main() From d44626a9c2eb95b434edc7a22a70f2cce63c9d84 Mon Sep 17 00:00:00 2001 From: Hennadii Stepanov <32963518+hebasto@users.noreply.github.com> Date: Tue, 21 Jan 2025 10:39:11 +0000 Subject: [PATCH 37/62] depends: Override default build type for `libevent` The `libevent` package defaults to the "Release" build type, which overrides our per-build-type optimization flags with `-O3`. To prevent this behavior, set `CMAKE_BUILD_TYPE` to "None", consistent with how other packages are handled. --- depends/packages/libevent.mk | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/depends/packages/libevent.mk b/depends/packages/libevent.mk index 9507fe9ddf..14ad24a1de 100644 --- a/depends/packages/libevent.mk +++ b/depends/packages/libevent.mk @@ -10,9 +10,10 @@ $(package)_build_subdir=build # version as we do in releases. Due to quirks in libevents build system, this # is also required to enable support for ipv6. See #19375. define $(package)_set_vars - $(package)_config_opts=-DEVENT__DISABLE_BENCHMARK=ON -DEVENT__DISABLE_OPENSSL=ON + $(package)_config_opts=-DCMAKE_BUILD_TYPE=None -DEVENT__DISABLE_BENCHMARK=ON -DEVENT__DISABLE_OPENSSL=ON $(package)_config_opts+=-DEVENT__DISABLE_SAMPLES=ON -DEVENT__DISABLE_REGRESS=ON $(package)_config_opts+=-DEVENT__DISABLE_TESTS=ON -DEVENT__LIBRARY_TYPE=STATIC + $(package)_cflags += -ffile-prefix-map=$($(package)_extract_dir)=/usr $(package)_cppflags += -D_GNU_SOURCE $(package)_cppflags_mingw32=-D_WIN32_WINNT=0x0A00 From 1111b0ac196d9d26c89c9f932c9784a99cdb218d Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Tue, 21 Jan 2025 15:35:13 +0100 Subject: [PATCH 38/62] ci: Add missing --combinedlogslen to test-each-commit task --- .github/workflows/ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3e9f885d13..0b4b8bae1e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,4 +1,4 @@ -# Copyright (c) 2023 The Bitcoin Core developers +# Copyright (c) 2023-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. @@ -72,7 +72,7 @@ jobs: run: | # Run tests on commits after the last merge commit and before the PR head commit # Use clang++, because it is a bit faster and uses less memory than g++ - git rebase --exec "echo Running test-one-commit on \$( git log -1 ) && CC=clang CXX=clang++ cmake -B build -DWERROR=ON -DWITH_ZMQ=ON -DBUILD_GUI=ON -DBUILD_BENCH=ON -DBUILD_FUZZ_BINARY=ON -DWITH_BDB=ON -DWITH_USDT=ON -DCMAKE_CXX_FLAGS='-Wno-error=unused-member-function' && cmake --build build -j $(nproc) && ctest --output-on-failure --stop-on-failure --test-dir build -j $(nproc) && ./build/test/functional/test_runner.py -j $(( $(nproc) * 2 ))" ${{ env.TEST_BASE }} + git rebase --exec "echo Running test-one-commit on \$( git log -1 ) && CC=clang CXX=clang++ cmake -B build -DWERROR=ON -DWITH_ZMQ=ON -DBUILD_GUI=ON -DBUILD_BENCH=ON -DBUILD_FUZZ_BINARY=ON -DWITH_BDB=ON -DWITH_USDT=ON -DCMAKE_CXX_FLAGS='-Wno-error=unused-member-function' && cmake --build build -j $(nproc) && ctest --output-on-failure --stop-on-failure --test-dir build -j $(nproc) && ./build/test/functional/test_runner.py -j $(( $(nproc) * 2 )) --combinedlogslen=99999999" ${{ env.TEST_BASE }} macos-native-arm64: name: ${{ matrix.job-name }} From fa80a7dac4be8a1d8e32d88e46da23f14e06bade Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Tue, 21 Jan 2025 15:36:32 +0100 Subject: [PATCH 39/62] test: Bump sync_mempools timeout in p2p_1p1c_network.py --- test/functional/p2p_1p1c_network.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/functional/p2p_1p1c_network.py b/test/functional/p2p_1p1c_network.py index eef307d1eb..2f6e3eeb70 100755 --- a/test/functional/p2p_1p1c_network.py +++ b/test/functional/p2p_1p1c_network.py @@ -150,7 +150,7 @@ def run_test(self): assert_equal(submitpackage_result["package_msg"], "success") self.log.info("Wait for mempools to sync") - self.sync_mempools(timeout=20) + self.sync_mempools() if __name__ == '__main__': From 6d11c9c60b5b4a71f8f75cb4eee2f9d0021de310 Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Tue, 10 Sep 2024 15:50:08 -0400 Subject: [PATCH 40/62] descriptor: Add proper Clone function to miniscript::Node Multipath descriptors requires performing a deep copy, so a Clone function that does that is added to miniscript::Node instead of the current shallow copy. Co-Authored-By: Antoine Poinsot --- src/script/descriptor.cpp | 4 ++-- src/script/miniscript.h | 19 +++++++++++++++++++ 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/src/script/descriptor.cpp b/src/script/descriptor.cpp index 499af47ee5..c8802d2bf8 100644 --- a/src/script/descriptor.cpp +++ b/src/script/descriptor.cpp @@ -1353,7 +1353,7 @@ class MiniscriptDescriptor final : public DescriptorImpl for (const auto& arg : m_pubkey_args) { providers.push_back(arg->Clone()); } - return std::make_unique(std::move(providers), miniscript::MakeNodeRef(*m_node)); + return std::make_unique(std::move(providers), m_node->Clone()); } }; @@ -2143,7 +2143,7 @@ std::vector> ParseScript(uint32_t& key_exp_index for (auto& pub : parser.m_keys) { pubs.emplace_back(std::move(pub.at(i))); } - ret.emplace_back(std::make_unique(std::move(pubs), node)); + ret.emplace_back(std::make_unique(std::move(pubs), node->Clone())); } return ret; } diff --git a/src/script/miniscript.h b/src/script/miniscript.h index 75f978457c..dad744aebb 100644 --- a/src/script/miniscript.h +++ b/src/script/miniscript.h @@ -528,6 +528,20 @@ struct Node { } } + NodeRef Clone() const + { + // Use TreeEval() to avoid a stack-overflow due to recursion + auto upfn = [](const Node& node, Span> children) { + std::vector> new_subs; + for (auto child = children.begin(); child != children.end(); ++child) { + new_subs.emplace_back(std::move(*child)); + } + // std::make_unique (and therefore MakeNodeRef) doesn't work on private constructors + return std::unique_ptr{new Node{internal::NoDupCheck{}, node.m_script_ctx, node.fragment, std::move(new_subs), node.keys, node.data, node.k}}; + }; + return TreeEval>(upfn); + } + private: //! Cached ops counts. const internal::Ops ops; @@ -546,6 +560,11 @@ struct Node { //! for all subnodes as well. mutable std::optional has_duplicate_keys; + // Constructor which takes all of the data that a Node could possibly contain. + // This is kept private as no valid fragment has all of these arguments. + // Only used by Clone() + Node(internal::NoDupCheck, MiniscriptContext script_ctx, Fragment nt, std::vector> sub, std::vector key, std::vector arg, uint32_t val) + : fragment(nt), k(val), keys(key), data(std::move(arg)), subs(std::move(sub)), m_script_ctx{script_ctx}, ops(CalcOps()), ss(CalcStackSize()), ws(CalcWitnessSize()), typ(CalcType()), scriptlen(CalcScriptLen()) {} //! Compute the length of the script for this miniscript (including children). size_t CalcScriptLen() const { From 9ccb46f91ac231d55a85bf2f61bd2f3276b54b01 Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Mon, 30 Dec 2024 16:15:13 -0500 Subject: [PATCH 41/62] miniscript: Ensure there is no NodeRef copy constructor or assignment operator --- src/script/miniscript.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/script/miniscript.h b/src/script/miniscript.h index dad744aebb..fbd10ff58e 100644 --- a/src/script/miniscript.h +++ b/src/script/miniscript.h @@ -1682,6 +1682,10 @@ struct Node { : Node(internal::NoDupCheck{}, ctx.MsContext(), nt, std::move(sub), val) { DuplicateKeyCheck(ctx); } template Node(const Ctx& ctx, Fragment nt, uint32_t val = 0) : Node(internal::NoDupCheck{}, ctx.MsContext(), nt, val) { DuplicateKeyCheck(ctx); } + + // Delete copy constructor and assignment operator, use Clone() instead + Node(const Node&) = delete; + Node& operator=(const Node&) = delete; }; namespace internal { From 09a1875ad8cddeb17c19af34b8282d37fed0937e Mon Sep 17 00:00:00 2001 From: Ava Chow Date: Mon, 30 Dec 2024 16:14:39 -0500 Subject: [PATCH 42/62] miniscript: Make NodeRef a unique_ptr There's no need for it to be a shared_ptr. --- src/script/miniscript.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/script/miniscript.h b/src/script/miniscript.h index fbd10ff58e..03f0a7c365 100644 --- a/src/script/miniscript.h +++ b/src/script/miniscript.h @@ -189,11 +189,11 @@ inline consteval Type operator""_mst(const char* c, size_t l) using Opcode = std::pair>; template struct Node; -template using NodeRef = std::shared_ptr>; +template using NodeRef = std::unique_ptr>; -//! Construct a miniscript node as a shared_ptr. +//! Construct a miniscript node as a unique_ptr. template -NodeRef MakeNodeRef(Args&&... args) { return std::make_shared>(std::forward(args)...); } +NodeRef MakeNodeRef(Args&&... args) { return std::make_unique>(std::forward(args)...); } //! The different node types in miniscript. enum class Fragment { From 66d21d0eb6517e04ebfb9fad4085e788de51b4dc Mon Sep 17 00:00:00 2001 From: Antoine Poinsot Date: Thu, 7 Nov 2024 10:52:45 -0500 Subject: [PATCH 43/62] qa: check parsed multipath descriptors dont share references --- src/test/descriptor_tests.cpp | 39 +++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/src/test/descriptor_tests.cpp b/src/test/descriptor_tests.cpp index 1b4020e0a0..63c53a842c 100644 --- a/src/test/descriptor_tests.cpp +++ b/src/test/descriptor_tests.cpp @@ -448,6 +448,20 @@ void CheckMultipath(const std::string& prv, /*spender_nlocktime=*/0, /*spender_nsequence=*/CTxIn::SEQUENCE_FINAL, /*preimages=*/{}, expanded_prvs.at(i), expanded_pubs.at(i), i); } + + // The descriptor for each path must be standalone. They should not share common references. Test this + // by parsing a multipath descriptor expression, deallocating all but one of the descriptors and making + // sure we can perform operations on it. + FlatSigningProvider prov, out; + std::string error; + const auto desc{[&](){ + auto parsed{Parse(pub, prov, error)}; + assert(parsed.size() > 1); + return std::move(parsed.at(0)); + }()}; + desc->ToString(); + std::vector out_scripts; + desc->Expand(0, prov, out_scripts, out); } void CheckInferDescriptor(const std::string& script_hex, const std::string& expected_desc, const std::vector& hex_scripts, const std::vector>& origin_pubkeys) @@ -851,6 +865,31 @@ BOOST_AUTO_TEST_CASE(descriptor_test) {{3}, {}}, } ); + CheckMultipath("tr(xprv9yYge4PS54XkYT9KiLfCRwc8Jeuz8DucxQGtuEecJZYhKNiqbPxYHTPzXtskmzWBqdqkRAGsghNmZzNsfU2wstaB3XjDQFPv567aQSSuPyo/<2;3>,l:pk(xprvA1ADjaN8H3HGnZSmt4VF7YdWoV9oNq8jhqhurxsrYycBAFK555cECoaY22KWt6BTRNLuvobW5VQTF89PN3iA485LAg7epazevPyjCa4xTzd))", + "tr(xpub6CY33ZvKuS63kwDnpNCCo5YrrgkUXgdUKdCVhd4Dru5gCB3z8wGnqFiUP98Za5pYSYF5KmvBHTY3Ra8FAJGggzBjuHS69WzN8gscPupuZwK/<2;3>,l:pk(xpub6E9a95u27Qqa13XEz62FUgaFMWzHnHrb54dWfMHU7K9A33eDccvUkbu1sHYoByHAgJdR326rWqn9pGZgZHz1afDprW5gGwS4gUX8Ri6aGPZ))", + { + "tr(xprv9yYge4PS54XkYT9KiLfCRwc8Jeuz8DucxQGtuEecJZYhKNiqbPxYHTPzXtskmzWBqdqkRAGsghNmZzNsfU2wstaB3XjDQFPv567aQSSuPyo/2,l:pk(xprvA1ADjaN8H3HGnZSmt4VF7YdWoV9oNq8jhqhurxsrYycBAFK555cECoaY22KWt6BTRNLuvobW5VQTF89PN3iA485LAg7epazevPyjCa4xTzd))", + "tr(xprv9yYge4PS54XkYT9KiLfCRwc8Jeuz8DucxQGtuEecJZYhKNiqbPxYHTPzXtskmzWBqdqkRAGsghNmZzNsfU2wstaB3XjDQFPv567aQSSuPyo/3,l:pk(xprvA1ADjaN8H3HGnZSmt4VF7YdWoV9oNq8jhqhurxsrYycBAFK555cECoaY22KWt6BTRNLuvobW5VQTF89PN3iA485LAg7epazevPyjCa4xTzd))", + }, + { + "tr(xpub6CY33ZvKuS63kwDnpNCCo5YrrgkUXgdUKdCVhd4Dru5gCB3z8wGnqFiUP98Za5pYSYF5KmvBHTY3Ra8FAJGggzBjuHS69WzN8gscPupuZwK/2,l:pk(xpub6E9a95u27Qqa13XEz62FUgaFMWzHnHrb54dWfMHU7K9A33eDccvUkbu1sHYoByHAgJdR326rWqn9pGZgZHz1afDprW5gGwS4gUX8Ri6aGPZ))", + "tr(xpub6CY33ZvKuS63kwDnpNCCo5YrrgkUXgdUKdCVhd4Dru5gCB3z8wGnqFiUP98Za5pYSYF5KmvBHTY3Ra8FAJGggzBjuHS69WzN8gscPupuZwK/3,l:pk(xpub6E9a95u27Qqa13XEz62FUgaFMWzHnHrb54dWfMHU7K9A33eDccvUkbu1sHYoByHAgJdR326rWqn9pGZgZHz1afDprW5gGwS4gUX8Ri6aGPZ))", + }, + { + "tr(xpub6CY33ZvKuS63kwDnpNCCo5YrrgkUXgdUKdCVhd4Dru5gCB3z8wGnqFiUP98Za5pYSYF5KmvBHTY3Ra8FAJGggzBjuHS69WzN8gscPupuZwK/2,l:pk(xpub6E9a95u27Qqa13XEz62FUgaFMWzHnHrb54dWfMHU7K9A33eDccvUkbu1sHYoByHAgJdR326rWqn9pGZgZHz1afDprW5gGwS4gUX8Ri6aGPZ))", + "tr(xpub6CY33ZvKuS63kwDnpNCCo5YrrgkUXgdUKdCVhd4Dru5gCB3z8wGnqFiUP98Za5pYSYF5KmvBHTY3Ra8FAJGggzBjuHS69WzN8gscPupuZwK/3,l:pk(xpub6E9a95u27Qqa13XEz62FUgaFMWzHnHrb54dWfMHU7K9A33eDccvUkbu1sHYoByHAgJdR326rWqn9pGZgZHz1afDprW5gGwS4gUX8Ri6aGPZ))", + }, + XONLY_KEYS, + { + {{"51200e3c14456bfa30f9f0bed6e55f35e1e9ca17c835e9f71b25bac0dfaab38ff2cd"}}, + {{"51202bdda29337ecaf8fcd5aa395febac6f99b8a866a0e8fb3f7bde2e24b1a7df2ba"}}, + }, + OutputType::BECH32M, + { + {{2}, {}}, + {{3}, {}}, + } + ); CheckUnparsable("pkh(xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/<0;1>/<2;3>)", "pkh(xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/<0;1>/<2;3>)", "pkh(): Multiple multipath key path specifiers found"); CheckUnparsable("pkh([deadbeef/<0;1>]xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/0)", "pkh([deadbeef/<0;1>]xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/0)", "pkh(): Key path value \'<0;1>\' specifies multipath in a section where multipath is not allowed"); CheckUnparsable("tr(xprv9s21ZrQH143K2Zu2kTVKcQi9nKhfgJUkYqG73wXsHuhATm1wkt6kcSZeTYEw2PL7krZtJopEYDvBdYWdAai3n3TWUTCVfHvPHqTYJv7smYe/6/*,{pk(xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/<1;2;3>/0/*),pk(xprv9s21ZrQH143K3jUwNHoqQNrtzJnJmx4Yup8NkNLdVQCymYbPbJXnPhwkfTfxZfptcs3rLAPUXS39oDLgrNKQGwbGsEmJJ8BU3RzQuvShEG4/0/0/<3;4>/*)})", "tr(xpub6B4sSbNr8XFYXqqKB7PeUemqgEaVtCLjgd5Lf2VYtezSHozC7ffCvVNCyu9TCgHntRQdimjV3tHbxmNfocxtuh6saNtZEw91gjXLRhQ3Yar/6/*,{pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL/<1;2;3>/0/*),pk(xpub6AhFhZJJGt9YB8i85RfrJ8jT3T2FF5EejDCXqXfm1DAczFEXkk8HD3CXTg2TmKM8wTbSnSw3wPg5JuyLitUrpRmkjn2BQXyZnqJx16AGy94/0/0/<3;4>/*)})", "tr(): Multipath subscripts have mismatched lengths"); From fa9aced8006bd9b10b9239b43140c37162c9d8f2 Mon Sep 17 00:00:00 2001 From: MarcoFalke <*~=`'#}+{/-|&$^_@721217.xyz> Date: Tue, 21 Jan 2025 12:07:44 +0100 Subject: [PATCH 44/62] test: Check that reindex with prune wipes blk files --- .../feature_remove_pruned_files_on_startup.py | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/test/functional/feature_remove_pruned_files_on_startup.py b/test/functional/feature_remove_pruned_files_on_startup.py index 4116ee1a80..a6fc49b256 100755 --- a/test/functional/feature_remove_pruned_files_on_startup.py +++ b/test/functional/feature_remove_pruned_files_on_startup.py @@ -2,10 +2,11 @@ # Copyright (c) 2022-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test removing undeleted pruned blk files on startup.""" +"""Tests around pruning rev and blk files on startup.""" import platform from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal class FeatureRemovePrunedFilesOnStartupTest(BitcoinTestFramework): @@ -50,6 +51,22 @@ def run_test(self): assert not blk0.exists() assert not rev1.exists() + self.log.info("Check that a reindex will wipe all files") + + def ls_files(): + ls = [ + entry.name + for entry in self.nodes[0].blocks_path.iterdir() + if entry.is_file() and any(map(entry.name.startswith, ["rev", "blk"])) + ] + return sorted(ls) + + assert_equal(len(ls_files()), 4) + self.restart_node(0, extra_args=self.extra_args[0] + ["-reindex"]) + assert_equal(self.nodes[0].getblockcount(), 0) + self.stop_node(0) # Stop node to flush the two newly created files + assert_equal(ls_files(), ["blk00000.dat", "rev00000.dat"]) + if __name__ == '__main__': FeatureRemovePrunedFilesOnStartupTest(__file__).main() From c4cc9e3e9df2733260942e0513dd8478d2a104da Mon Sep 17 00:00:00 2001 From: Sjors Provoost Date: Fri, 3 Jan 2025 10:07:38 +0100 Subject: [PATCH 45/62] consensus: add DeriveTarget() to pow.h Split CheckProofOfWorkImpl() to introduce a helper function DeriveTarget() which converts the nBits value to the target. The function takes pow_limit as an argument so later commits can avoid having to pass ChainstateManager through the call stack. Co-authored-by: Ryan Ofsky --- src/pow.cpp | 14 +++++++++++--- src/pow.h | 12 ++++++++++++ 2 files changed, 23 insertions(+), 3 deletions(-) diff --git a/src/pow.cpp b/src/pow.cpp index bbcf39b593..686b177fe3 100644 --- a/src/pow.cpp +++ b/src/pow.cpp @@ -143,7 +143,7 @@ bool CheckProofOfWork(uint256 hash, unsigned int nBits, const Consensus::Params& return CheckProofOfWorkImpl(hash, nBits, params); } -bool CheckProofOfWorkImpl(uint256 hash, unsigned int nBits, const Consensus::Params& params) +std::optional DeriveTarget(unsigned int nBits, const uint256 pow_limit) { bool fNegative; bool fOverflow; @@ -152,8 +152,16 @@ bool CheckProofOfWorkImpl(uint256 hash, unsigned int nBits, const Consensus::Par bnTarget.SetCompact(nBits, &fNegative, &fOverflow); // Check range - if (fNegative || bnTarget == 0 || fOverflow || bnTarget > UintToArith256(params.powLimit)) - return false; + if (fNegative || bnTarget == 0 || fOverflow || bnTarget > UintToArith256(pow_limit)) + return {}; + + return bnTarget; +} + +bool CheckProofOfWorkImpl(uint256 hash, unsigned int nBits, const Consensus::Params& params) +{ + auto bnTarget{DeriveTarget(nBits, params.powLimit)}; + if (!bnTarget) return false; // Check proof of work matches claimed amount if (UintToArith256(hash) > bnTarget) diff --git a/src/pow.h b/src/pow.h index 2b28ade273..ceba55d36a 100644 --- a/src/pow.h +++ b/src/pow.h @@ -13,6 +13,18 @@ class CBlockHeader; class CBlockIndex; class uint256; +class arith_uint256; + +/** + * Convert nBits value to target. + * + * @param[in] nBits compact representation of the target + * @param[in] pow_limit PoW limit (consensus parameter) + * + * @return the proof-of-work target or nullopt if the nBits value + * is invalid (due to overflow or exceeding pow_limit) + */ +std::optional DeriveTarget(unsigned int nBits, const uint256 pow_limit); unsigned int GetNextWorkRequired(const CBlockIndex* pindexLast, const CBlockHeader *pblock, const Consensus::Params&); unsigned int CalculateNextWorkRequired(const CBlockIndex* pindexLast, int64_t nFirstBlockTime, const Consensus::Params&); From ba7b9f3d7bf5a1ad395262b080e832f5c9958e4d Mon Sep 17 00:00:00 2001 From: Sjors Provoost Date: Mon, 30 Dec 2024 17:56:54 +0100 Subject: [PATCH 46/62] build: move pow and chain to bitcoin_common The next commit needs pow.cpp in rpc/util.cpp. --- src/CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 889c00c783..89fdd855a4 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -109,6 +109,7 @@ add_library(bitcoin_common STATIC EXCLUDE_FROM_ALL addresstype.cpp base58.cpp bech32.cpp + chain.cpp chainparams.cpp chainparamsbase.cpp coins.cpp @@ -142,6 +143,7 @@ add_library(bitcoin_common STATIC EXCLUDE_FROM_ALL outputtype.cpp policy/feerate.cpp policy/policy.cpp + pow.cpp protocol.cpp psbt.cpp rpc/external_signer.cpp @@ -200,7 +202,6 @@ add_library(bitcoin_node STATIC EXCLUDE_FROM_ALL bip324.cpp blockencodings.cpp blockfilter.cpp - chain.cpp consensus/tx_verify.cpp dbwrapper.cpp deploymentstatus.cpp @@ -262,7 +263,6 @@ add_library(bitcoin_node STATIC EXCLUDE_FROM_ALL policy/rbf.cpp policy/settings.cpp policy/truc_policy.cpp - pow.cpp rest.cpp rpc/blockchain.cpp rpc/fees.cpp From 7ddbed4f9fc0c90bfed244a71194740a4a1fa1be Mon Sep 17 00:00:00 2001 From: Sjors Provoost Date: Wed, 8 Jan 2025 13:13:08 +0100 Subject: [PATCH 47/62] rpc: add nBits to getmininginfo Also expands nBits test coverage. --- src/rpc/blockchain.cpp | 4 ++-- src/rpc/mining.cpp | 5 ++++- test/functional/mining_basic.py | 3 +++ test/functional/rpc_blockchain.py | 6 ++++-- test/functional/test_framework/blocktools.py | 6 +++++- 5 files changed, 18 insertions(+), 6 deletions(-) diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 823d2303c8..3caf4bf4eb 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -553,7 +553,7 @@ static RPCHelpMan getblockheader() {RPCResult::Type::NUM_TIME, "time", "The block time expressed in " + UNIX_EPOCH_TIME}, {RPCResult::Type::NUM_TIME, "mediantime", "The median block time expressed in " + UNIX_EPOCH_TIME}, {RPCResult::Type::NUM, "nonce", "The nonce"}, - {RPCResult::Type::STR_HEX, "bits", "The bits"}, + {RPCResult::Type::STR_HEX, "bits", "nBits: compact representation of the block difficulty target"}, {RPCResult::Type::NUM, "difficulty", "The difficulty"}, {RPCResult::Type::STR_HEX, "chainwork", "Expected number of hashes required to produce the current chain"}, {RPCResult::Type::NUM, "nTx", "The number of transactions in the block"}, @@ -727,7 +727,7 @@ static RPCHelpMan getblock() {RPCResult::Type::NUM_TIME, "time", "The block time expressed in " + UNIX_EPOCH_TIME}, {RPCResult::Type::NUM_TIME, "mediantime", "The median block time expressed in " + UNIX_EPOCH_TIME}, {RPCResult::Type::NUM, "nonce", "The nonce"}, - {RPCResult::Type::STR_HEX, "bits", "The bits"}, + {RPCResult::Type::STR_HEX, "bits", "nBits: compact representation of the block difficulty target"}, {RPCResult::Type::NUM, "difficulty", "The difficulty"}, {RPCResult::Type::STR_HEX, "chainwork", "Expected number of hashes required to produce the chain up to this block (in hex)"}, {RPCResult::Type::NUM, "nTx", "The number of transactions in the block"}, diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp index 7e5936fddf..c8bd8d3fd2 100644 --- a/src/rpc/mining.cpp +++ b/src/rpc/mining.cpp @@ -421,6 +421,7 @@ static RPCHelpMan getmininginfo() {RPCResult::Type::NUM, "blocks", "The current block"}, {RPCResult::Type::NUM, "currentblockweight", /*optional=*/true, "The block weight of the last assembled block (only present if a block was ever assembled)"}, {RPCResult::Type::NUM, "currentblocktx", /*optional=*/true, "The number of block transactions of the last assembled block (only present if a block was ever assembled)"}, + {RPCResult::Type::STR_HEX, "bits", "The current nBits, compact representation of the block difficulty target"}, {RPCResult::Type::NUM, "difficulty", "The current difficulty"}, {RPCResult::Type::NUM, "networkhashps", "The network hashes per second"}, {RPCResult::Type::NUM, "pooledtx", "The size of the mempool"}, @@ -446,12 +447,14 @@ static RPCHelpMan getmininginfo() ChainstateManager& chainman = EnsureChainman(node); LOCK(cs_main); const CChain& active_chain = chainman.ActiveChain(); + CBlockIndex& tip{*CHECK_NONFATAL(active_chain.Tip())}; UniValue obj(UniValue::VOBJ); obj.pushKV("blocks", active_chain.Height()); if (BlockAssembler::m_last_block_weight) obj.pushKV("currentblockweight", *BlockAssembler::m_last_block_weight); if (BlockAssembler::m_last_block_num_txs) obj.pushKV("currentblocktx", *BlockAssembler::m_last_block_num_txs); - obj.pushKV("difficulty", GetDifficulty(*CHECK_NONFATAL(active_chain.Tip()))); + obj.pushKV("bits", strprintf("%08x", tip.nBits)); + obj.pushKV("difficulty", GetDifficulty(tip)); obj.pushKV("networkhashps", getnetworkhashps().HandleRequest(request)); obj.pushKV("pooledtx", (uint64_t)mempool.size()); obj.pushKV("chain", chainman.GetParams().GetChainTypeString()); diff --git a/test/functional/mining_basic.py b/test/functional/mining_basic.py index d367ec122d..d30d8533a5 100755 --- a/test/functional/mining_basic.py +++ b/test/functional/mining_basic.py @@ -16,6 +16,8 @@ get_witness_script, NORMAL_GBT_REQUEST_PARAMS, TIME_GENESIS_BLOCK, + REGTEST_N_BITS, + nbits_str, ) from test_framework.messages import ( BLOCK_HEADER_SIZE, @@ -206,6 +208,7 @@ def assert_submitblock(block, result_str_1, result_str_2=None): assert_equal(mining_info['chain'], self.chain) assert 'currentblocktx' not in mining_info assert 'currentblockweight' not in mining_info + assert_equal(mining_info['bits'], nbits_str(REGTEST_N_BITS)) assert_equal(mining_info['difficulty'], Decimal('4.656542373906925E-10')) assert_equal(mining_info['networkhashps'], Decimal('0.003333333333333334')) assert_equal(mining_info['pooledtx'], 0) diff --git a/test/functional/rpc_blockchain.py b/test/functional/rpc_blockchain.py index f02e6914ef..ae95beae2c 100755 --- a/test/functional/rpc_blockchain.py +++ b/test/functional/rpc_blockchain.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# Copyright (c) 2014-2022 The Bitcoin Core developers +# Copyright (c) 2014-present The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test RPCs related to blockchainstate. @@ -30,9 +30,11 @@ from test_framework.blocktools import ( MAX_FUTURE_BLOCK_TIME, TIME_GENESIS_BLOCK, + REGTEST_N_BITS, create_block, create_coinbase, create_tx_with_script, + nbits_str, ) from test_framework.messages import ( CBlockHeader, @@ -412,7 +414,7 @@ def _test_getblockheader(self): assert_is_hash_string(header['hash']) assert_is_hash_string(header['previousblockhash']) assert_is_hash_string(header['merkleroot']) - assert_is_hash_string(header['bits'], length=None) + assert_equal(header['bits'], nbits_str(REGTEST_N_BITS)) assert isinstance(header['time'], int) assert_equal(header['mediantime'], TIME_RANGE_MTP) assert isinstance(header['nonce'], int) diff --git a/test/functional/test_framework/blocktools.py b/test/functional/test_framework/blocktools.py index 705b8e8fe5..c92b1398c3 100644 --- a/test/functional/test_framework/blocktools.py +++ b/test/functional/test_framework/blocktools.py @@ -65,6 +65,10 @@ VERSIONBITS_LAST_OLD_BLOCK_VERSION = 4 MIN_BLOCKS_TO_KEEP = 288 +REGTEST_N_BITS = 0x207fffff # difficulty retargeting is disabled in REGTEST chainparams" + +def nbits_str(nbits): + return f"{nbits:08x}" def create_block(hashprev=None, coinbase=None, ntime=None, *, version=None, tmpl=None, txlist=None): """Create a block (with regtest difficulty).""" @@ -77,7 +81,7 @@ def create_block(hashprev=None, coinbase=None, ntime=None, *, version=None, tmpl if tmpl and tmpl.get('bits') is not None: block.nBits = struct.unpack('>I', bytes.fromhex(tmpl['bits']))[0] else: - block.nBits = 0x207fffff # difficulty retargeting is disabled in REGTEST chainparams + block.nBits = REGTEST_N_BITS if coinbase is None: coinbase = create_coinbase(height=tmpl['height']) block.vtx.append(coinbase) From d20d96fa41ce706ccc480b4f3143438ce0720348 Mon Sep 17 00:00:00 2001 From: tdb3 <106488469+tdb3@users.noreply.github.com> Date: Wed, 1 Jan 2025 19:50:03 -0500 Subject: [PATCH 48/62] test: use REGTEST_N_BITS in feature_block --- test/functional/feature_block.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/test/functional/feature_block.py b/test/functional/feature_block.py index 384ca311c7..2dfa568c5b 100755 --- a/test/functional/feature_block.py +++ b/test/functional/feature_block.py @@ -12,6 +12,7 @@ create_tx_with_script, get_legacy_sigopcount_block, MAX_BLOCK_SIGOPS, + REGTEST_N_BITS, ) from test_framework.messages import ( CBlock, @@ -590,7 +591,7 @@ def run_test(self): b44 = CBlock() b44.nTime = self.tip.nTime + 1 b44.hashPrevBlock = self.tip.sha256 - b44.nBits = 0x207fffff + b44.nBits = REGTEST_N_BITS b44.vtx.append(coinbase) tx = self.create_and_sign_transaction(out[14], 1) b44.vtx.append(tx) @@ -606,7 +607,7 @@ def run_test(self): b45 = CBlock() b45.nTime = self.tip.nTime + 1 b45.hashPrevBlock = self.tip.sha256 - b45.nBits = 0x207fffff + b45.nBits = REGTEST_N_BITS b45.vtx.append(non_coinbase) b45.hashMerkleRoot = b45.calc_merkle_root() b45.solve() @@ -620,7 +621,7 @@ def run_test(self): b46 = CBlock() b46.nTime = b44.nTime + 1 b46.hashPrevBlock = b44.sha256 - b46.nBits = 0x207fffff + b46.nBits = REGTEST_N_BITS b46.vtx = [] b46.hashMerkleRoot = 0 b46.solve() From 341f93251677fee66c822f414b75499e8b3b31f6 Mon Sep 17 00:00:00 2001 From: Sjors Provoost Date: Wed, 8 Jan 2025 13:03:08 +0100 Subject: [PATCH 49/62] rpc: add GetTarget helper --- src/rpc/util.cpp | 8 ++++++++ src/rpc/util.h | 10 ++++++++++ 2 files changed, 18 insertions(+) diff --git a/src/rpc/util.cpp b/src/rpc/util.cpp index b1fbc25641..2941dda8c0 100644 --- a/src/rpc/util.cpp +++ b/src/rpc/util.cpp @@ -4,6 +4,7 @@ #include // IWYU pragma: keep +#include #include #include #include @@ -13,6 +14,7 @@ #include #include #include +#include #include #include