diff --git a/docs/integration.md b/docs/integration.md index 8140017fb3..cd4dbdd114 100644 --- a/docs/integration.md +++ b/docs/integration.md @@ -19,7 +19,7 @@ stellar-core generates several types of data that can be used by applications, d Full [Ledger](ledger.md) snapshots are available in both: * [history archives](history.md) (checkpoints, every 64 ledgers, updated every 5 minutes) - * in the case of captive-core (enabled via the `--in-memory` command line option) the ledger is maintained within the stellar-core process and ledger-state need to be tracked as it changes via "meta" updates. +* in the case of captive-core the ledger is maintained within the stellar-core process and ledger-state need to be tracked as it changes via "meta" updates. ## Ledger State transition information (transactions, etc) diff --git a/docs/quick-reference.md b/docs/quick-reference.md index 24c76a6db5..56d4af2d9c 100644 --- a/docs/quick-reference.md +++ b/docs/quick-reference.md @@ -147,9 +147,8 @@ transactions or ledger states) must be downloaded and verified sequentially. It worthwhile to save and reuse such a trusted reference file multiple times before regenerating it. ##### Experimental fast "meta data generation" -`catchup` has a command line flag `--in-memory` that when combined with the -`METADATA_OUTPUT_STREAM` allows a stellar-core instance to stream meta data instead -of using a database as intermediate store. +`catchup` when combined with the +`METADATA_OUTPUT_STREAM` allows a stellar-core instance to stream meta data. This has been tested as being orders of magnitude faster for replaying large sections of history. @@ -157,17 +156,7 @@ of history. If you don't specify any value for stream the command will just replay transactions in memory and throw away all meta. This can be useful for performance testing the transaction processing subsystem. -The `--in-memory` flag is also supported by the `run` command, which can be used to -run a lightweight, stateless validator or watcher node, and this can be combined with -`METADATA_OUTPUT_STREAM` to stream network activity to another process. - -By default, such a stateless node in `run` mode will catch up to the network starting from the -network's most recent checkpoint, but this behaviour can be further modified using two flags -(that must be used together) called `--start-at-ledger ` and `--start-at-hash `. These -cause the node to start with a fast in-memory catchup to ledger `N` with hash `HEXHASH`, and then -replay ledgers forward to the current state of the network. - -A stateless and meta-streaming node can additionally be configured with +A meta-streaming node can additionally be configured with `EXPERIMENTAL_PRECAUTION_DELAY_META=true` (if unspecified, the default is `false`). If `EXPERIMENTAL_PRECAUTION_DELAY_META` is `true`, then the node will delay emitting meta for a ledger `` until the _next_ ledger, ``, closes. diff --git a/docs/software/commands.md b/docs/software/commands.md index ac51fe10f0..cc06917f25 100644 --- a/docs/software/commands.md +++ b/docs/software/commands.md @@ -159,13 +159,7 @@ apply. checkpoint from a history archive. * **run**: Runs stellar-core service.
Option **--wait-for-consensus** lets validators wait to hear from the network - before participating in consensus.
- (deprecated) Option **--in-memory** stores the current ledger in memory rather than a - database.
- (deprecated) Option **--start-at-ledger ** starts **--in-memory** mode with a catchup to - ledger **N** then replays to the current state of the network.
- (deprecated) Option **--start-at-hash ** provides a (mandatory) hash for the ledger - **N** specified by the **--start-at-ledger** option. + before participating in consensus. * **sec-to-pub**: Reads a secret key on standard input and outputs the corresponding public key. Both keys are in Stellar's standard base-32 ASCII format. diff --git a/docs/stellar-core_example.cfg b/docs/stellar-core_example.cfg index 103c115cf4..873a7b955a 100644 --- a/docs/stellar-core_example.cfg +++ b/docs/stellar-core_example.cfg @@ -229,14 +229,6 @@ FLOOD_DEMAND_BACKOFF_DELAY_MS = 500 # against each other. MAX_DEX_TX_OPERATIONS_IN_TX_SET = 0 -# DEPRECATED_SQL_LEDGER_STATE (bool) default false -# When set to true, SQL is used to store all ledger state instead of -# BucketListDB. This is not recommended and may cause performance degregradation. -# This is deprecated and will be removed in the future. Note that offers table -# is still maintained in SQL when this is set to false, but all other ledger -# state tables are dropped. -DEPRECATED_SQL_LEDGER_STATE = false - # BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT (Integer) default 14 # Determines page size used by BucketListDB for range indexes, where # pageSize == 2^BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT. If set to @@ -258,11 +250,6 @@ BUCKETLIST_DB_INDEX_CUTOFF = 20 # this value is ingnored and indexes are never persisted. BUCKETLIST_DB_PERSIST_INDEX = true -# BACKGROUND_EVICTION_SCAN (bool) default true -# Determines whether eviction scans occur in the background thread. Requires -# that DEPRECATED_SQL_LEDGER_STATE is set to false. -BACKGROUND_EVICTION_SCAN = true - # EXPERIMENTAL_BACKGROUND_OVERLAY_PROCESSING (bool) default false # Determines whether some of overlay processing occurs in the background # thread. @@ -601,17 +588,12 @@ MAX_SLOTS_TO_REMEMBER=12 # only a passive "watcher" node. METADATA_OUTPUT_STREAM="" -# Setting EXPERIMENTAL_PRECAUTION_DELAY_META to true causes a stateless node +# Setting EXPERIMENTAL_PRECAUTION_DELAY_META to true causes a node # which is streaming meta to delay streaming the meta for a given ledger until # it closes the next ledger. This ensures that if a local bug had corrupted the # given ledger, then the meta for the corrupted ledger will never be emitted, as # the node will not be able to reach consensus with the network on the next # ledger. -# -# Setting EXPERIMENTAL_PRECAUTION_DELAY_META to true in combination with a -# non-empty METADATA_OUTPUT_STREAM (which can be configured on the command line -# as well as in the config file) requires an in-memory database (specified by -# using --in-memory on the command line). EXPERIMENTAL_PRECAUTION_DELAY_META=false # Number of ledgers worth of transaction metadata to preserve on disk for diff --git a/docs/stellar-core_example_validators.cfg b/docs/stellar-core_example_validators.cfg index 10d6ced3ee..a1203e1047 100644 --- a/docs/stellar-core_example_validators.cfg +++ b/docs/stellar-core_example_validators.cfg @@ -4,7 +4,6 @@ PUBLIC_HTTP_PORT=false NETWORK_PASSPHRASE="Example configuration" DATABASE="sqlite3://example.db" -DEPRECATED_SQL_LEDGER_STATE = false NODE_SEED="SA7FGJMMUIHNE3ZPI2UO5I632A7O5FBAZTXFAIEVFA4DSSGLHXACLAIT a3" NODE_HOME_DOMAIN="domainA" diff --git a/docs/stellar-core_standalone.cfg b/docs/stellar-core_standalone.cfg index b9fd80a509..858e97d002 100644 --- a/docs/stellar-core_standalone.cfg +++ b/docs/stellar-core_standalone.cfg @@ -12,7 +12,6 @@ NODE_IS_VALIDATOR=true #DATABASE="postgresql://dbname=stellar user=postgres password=password host=localhost" DATABASE="sqlite3://stellar.db" -DEPRECATED_SQL_LEDGER_STATE = false COMMANDS=["ll?level=debug"] diff --git a/docs/stellar-core_testnet.cfg b/docs/stellar-core_testnet.cfg index 77c834eb62..981105b7a6 100644 --- a/docs/stellar-core_testnet.cfg +++ b/docs/stellar-core_testnet.cfg @@ -4,7 +4,6 @@ PUBLIC_HTTP_PORT=false NETWORK_PASSPHRASE="Test SDF Network ; September 2015" DATABASE="sqlite3://stellar.db" -DEPRECATED_SQL_LEDGER_STATE = false # Stellar Testnet validators [[HOME_DOMAINS]] diff --git a/docs/stellar-core_testnet_legacy.cfg b/docs/stellar-core_testnet_legacy.cfg index 946e7c8bc9..0ff9909c9f 100644 --- a/docs/stellar-core_testnet_legacy.cfg +++ b/docs/stellar-core_testnet_legacy.cfg @@ -9,7 +9,6 @@ KNOWN_PEERS=[ "core-testnet3.stellar.org"] DATABASE="sqlite3://stellar.db" -DEPRECATED_SQL_LEDGER_STATE = false UNSAFE_QUORUM=true FAILURE_SAFETY=1 diff --git a/docs/stellar-core_testnet_validator.cfg b/docs/stellar-core_testnet_validator.cfg index c5d5768e87..fa329c0c43 100644 --- a/docs/stellar-core_testnet_validator.cfg +++ b/docs/stellar-core_testnet_validator.cfg @@ -4,7 +4,6 @@ PUBLIC_HTTP_PORT=false NETWORK_PASSPHRASE="Test SDF Network ; September 2015" DATABASE="sqlite3://stellar.db" -DEPRECATED_SQL_LEDGER_STATE = false # Configuring the node as a validator # note that this is an unsafe configuration in this particular setup: diff --git a/src/bucket/Bucket.cpp b/src/bucket/Bucket.cpp index 61e96b3a28..9e41903728 100644 --- a/src/bucket/Bucket.cpp +++ b/src/bucket/Bucket.cpp @@ -8,6 +8,7 @@ #include "util/asio.h" // IWYU pragma: keep #include "bucket/Bucket.h" #include "bucket/BucketApplicator.h" +#include "bucket/BucketInputIterator.h" #include "bucket/BucketList.h" #include "bucket/BucketListSnapshot.h" #include "bucket/BucketManager.h" @@ -24,11 +25,13 @@ #include "util/Fs.h" #include "util/GlobalChecks.h" #include "util/Logging.h" +#include "util/ProtocolVersion.h" #include "util/XDRStream.h" #include "util/types.h" #include #include "medida/counter.h" +#include "xdr/Stellar-ledger.h" namespace stellar { @@ -97,10 +100,10 @@ Bucket::getSize() const } bool -Bucket::containsBucketIdentity(BucketEntry const& id) const +LiveBucket::containsBucketIdentity(BucketEntry const& id) const { - BucketEntryIdCmp cmp; - BucketInputIterator iter(shared_from_this()); + BucketEntryIdCmp cmp; + LiveBucketInputIterator iter(shared_from_this()); while (iter) { if (!(cmp(*iter, id) || cmp(id, *iter))) @@ -132,19 +135,10 @@ Bucket::freeIndex() #ifdef BUILD_TESTS void -Bucket::apply(Application& app) const +LiveBucket::apply(Application& app) const { ZoneScoped; - auto filter = [&](LedgerEntryType t) { - if (app.getConfig().isUsingBucketListDB()) - { - return t == OFFER; - } - - return true; - }; - std::unordered_set emptySet; BucketApplicator applicator( app, app.getConfig().LEDGER_PROTOCOL_VERSION, @@ -152,7 +146,7 @@ Bucket::apply(Application& app) const 0 /*set to a level that's not the bottom so we don't treat live entries as init*/ , - shared_from_this(), filter, emptySet); + shared_from_this(), emptySet); BucketApplicator::Counters counters(app.getClock().now()); while (applicator) { @@ -163,10 +157,10 @@ Bucket::apply(Application& app) const #endif // BUILD_TESTS std::vector -Bucket::convertToBucketEntry(bool useInit, - std::vector const& initEntries, - std::vector const& liveEntries, - std::vector const& deadEntries) +LiveBucket::convertToBucketEntry(bool useInit, + std::vector const& initEntries, + std::vector const& liveEntries, + std::vector const& deadEntries) { std::vector bucket; for (auto const& e : initEntries) @@ -191,7 +185,7 @@ Bucket::convertToBucketEntry(bool useInit, bucket.push_back(ce); } - BucketEntryIdCmp cmp; + BucketEntryIdCmp cmp; std::sort(bucket.begin(), bucket.end(), cmp); releaseAssert(std::adjacent_find( bucket.begin(), bucket.end(), @@ -229,12 +223,83 @@ Bucket::randomBucketIndexName(std::string const& tmpDir) return randomFileName(tmpDir, ".index"); } -std::shared_ptr -Bucket::fresh(BucketManager& bucketManager, uint32_t protocolVersion, - std::vector const& initEntries, - std::vector const& liveEntries, - std::vector const& deadEntries, bool countMergeEvents, - asio::io_context& ctx, bool doFsync) +std::vector +HotArchiveBucket::convertToBucketEntry( + std::vector const& archivedEntries, + std::vector const& restoredEntries, + std::vector const& deletedEntries) +{ + std::vector bucket; + for (auto const& e : archivedEntries) + { + HotArchiveBucketEntry be; + be.type(HOT_ARCHIVE_ARCHIVED); + be.archivedEntry() = e; + bucket.push_back(be); + } + for (auto const& k : restoredEntries) + { + HotArchiveBucketEntry be; + be.type(HOT_ARCHIVE_LIVE); + be.key() = k; + bucket.push_back(be); + } + for (auto const& k : deletedEntries) + { + HotArchiveBucketEntry be; + be.type(HOT_ARCHIVE_DELETED); + be.key() = k; + bucket.push_back(be); + } + + BucketEntryIdCmp cmp; + std::sort(bucket.begin(), bucket.end(), cmp); + releaseAssert(std::adjacent_find(bucket.begin(), bucket.end(), + [&cmp](HotArchiveBucketEntry const& lhs, + HotArchiveBucketEntry const& rhs) { + return !cmp(lhs, rhs); + }) == bucket.end()); + return bucket; +} + +std::shared_ptr +HotArchiveBucket::fresh(BucketManager& bucketManager, uint32_t protocolVersion, + std::vector const& archivedEntries, + std::vector const& restoredEntries, + std::vector const& deletedEntries, + bool countMergeEvents, asio::io_context& ctx, + bool doFsync) +{ + ZoneScoped; + BucketMetadata meta; + meta.ledgerVersion = protocolVersion; + meta.ext.v(1); + meta.ext.bucketListType() = BucketListType::HOT_ARCHIVE; + auto entries = + convertToBucketEntry(archivedEntries, restoredEntries, deletedEntries); + + MergeCounters mc; + HotArchiveBucketOutputIterator out(bucketManager.getTmpDir(), true, meta, + mc, ctx, doFsync); + for (auto const& e : entries) + { + out.put(e); + } + + if (countMergeEvents) + { + bucketManager.incrMergeCounters(mc); + } + + return out.getBucket(bucketManager); +} + +std::shared_ptr +LiveBucket::fresh(BucketManager& bucketManager, uint32_t protocolVersion, + std::vector const& initEntries, + std::vector const& liveEntries, + std::vector const& deadEntries, + bool countMergeEvents, asio::io_context& ctx, bool doFsync) { ZoneScoped; // When building fresh buckets after protocol version 10 (i.e. version @@ -245,12 +310,21 @@ Bucket::fresh(BucketManager& bucketManager, uint32_t protocolVersion, BucketMetadata meta; meta.ledgerVersion = protocolVersion; + + if (protocolVersionStartsFrom( + protocolVersion, + Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + meta.ext.v(1); + meta.ext.bucketListType() = BucketListType::LIVE; + } + auto entries = convertToBucketEntry(useInit, initEntries, liveEntries, deadEntries); MergeCounters mc; - BucketOutputIterator out(bucketManager.getTmpDir(), true, meta, mc, ctx, - doFsync); + LiveBucketOutputIterator out(bucketManager.getTmpDir(), true, meta, mc, ctx, + doFsync); for (auto const& e : entries) { out.put(e); @@ -261,8 +335,7 @@ Bucket::fresh(BucketManager& bucketManager, uint32_t protocolVersion, bucketManager.incrMergeCounters(mc); } - return out.getBucket(bucketManager, - bucketManager.getConfig().isUsingBucketListDB()); + return out.getBucket(bucketManager); } static void @@ -286,8 +359,8 @@ countShadowedEntryType(MergeCounters& mc, BucketEntry const& e) } void -Bucket::checkProtocolLegality(BucketEntry const& entry, - uint32_t protocolVersion) +LiveBucket::checkProtocolLegality(BucketEntry const& entry, + uint32_t protocolVersion) { if (protocolVersionIsBefore( protocolVersion, @@ -301,8 +374,19 @@ Bucket::checkProtocolLegality(BucketEntry const& entry, } inline void -maybePut(BucketOutputIterator& out, BucketEntry const& entry, - std::vector& shadowIterators, +maybePut(HotArchiveBucketOutputIterator& out, + HotArchiveBucketEntry const& entry, + std::vector& shadowIterators, + bool keepShadowedLifecycleEntries, MergeCounters& mc) +{ + // Archived BucketList is only present after protocol 21, so shadows are + // never supported + out.put(entry); +} + +inline void +maybePut(LiveBucketOutputIterator& out, BucketEntry const& entry, + std::vector& shadowIterators, bool keepShadowedLifecycleEntries, MergeCounters& mc) { // In ledgers before protocol 11, keepShadowedLifecycleEntries will be @@ -340,8 +424,8 @@ maybePut(BucketOutputIterator& out, BucketEntry const& entry, // Note that this decision only controls whether to elide dead entries due // to _shadows_. There is a secondary elision of dead entries at the _oldest // level_ of the bucketlist that is accomplished through filtering at the - // BucketOutputIterator level, and happens independent of ledger protocol - // version. + // LiveBucketOutputIterator level, and happens independent of ledger + // protocol version. if (keepShadowedLifecycleEntries && (entry.type() == INITENTRY || entry.type() == DEADENTRY)) @@ -351,7 +435,7 @@ maybePut(BucketOutputIterator& out, BucketEntry const& entry, return; } - BucketEntryIdCmp cmp; + BucketEntryIdCmp cmp; for (auto& si : shadowIterators) { // Advance the shadowIterator while it's less than the candidate @@ -447,11 +531,13 @@ countNewEntryType(MergeCounters& mc, BucketEntry const& e) // and shadowing protocol simultaneously, the moment the first new-protocol // bucket enters the youngest level. At least one new bucket is in every merge's // shadows from then on in, so they all upgrade (and preserve lifecycle events). +template static void calculateMergeProtocolVersion( MergeCounters& mc, uint32_t maxProtocolVersion, - BucketInputIterator const& oi, BucketInputIterator const& ni, - std::vector const& shadowIterators, + BucketInputIterator const& oi, + BucketInputIterator const& ni, + std::vector> const& shadowIterators, uint32& protocolVersion, bool& keepShadowedLifecycleEntries) { protocolVersion = std::max(oi.getMetadata().ledgerVersion, @@ -466,7 +552,7 @@ calculateMergeProtocolVersion( { auto version = si.getMetadata().ledgerVersion; if (protocolVersionIsBefore(version, - Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) + LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) { protocolVersion = std::max(version, protocolVersion); } @@ -488,9 +574,16 @@ calculateMergeProtocolVersion( // support annihilation of INITENTRY and DEADENTRY pairs. See commentary // above in `maybePut`. keepShadowedLifecycleEntries = true; + + // Don't count shadow metrics for Hot Archive BucketList + if constexpr (std::is_same_v) + { + return; + } + if (protocolVersionIsBefore( protocolVersion, - Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY)) + LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY)) { ++mc.mPreInitEntryProtocolMerges; keepShadowedLifecycleEntries = false; @@ -501,7 +594,7 @@ calculateMergeProtocolVersion( } if (protocolVersionIsBefore(protocolVersion, - Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) + LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) { ++mc.mPreShadowRemovalProtocolMerges; } @@ -519,13 +612,18 @@ calculateMergeProtocolVersion( // side, or entries that compare non-equal. In all these cases we just // take the lesser (or existing) entry and advance only one iterator, // not scrutinizing the entry type further. +template static bool mergeCasesWithDefaultAcceptance( - BucketEntryIdCmp const& cmp, MergeCounters& mc, BucketInputIterator& oi, - BucketInputIterator& ni, BucketOutputIterator& out, - std::vector& shadowIterators, uint32_t protocolVersion, - bool keepShadowedLifecycleEntries) + BucketEntryIdCmp const& cmp, MergeCounters& mc, + BucketInputIterator& oi, BucketInputIterator& ni, + BucketOutputIterator& out, + std::vector>& shadowIterators, + uint32_t protocolVersion, bool keepShadowedLifecycleEntries) { + static_assert(std::is_same_v || + std::is_same_v); + if (!ni || (oi && ni && cmp(*oi, *ni))) { // Either of: @@ -535,8 +633,11 @@ mergeCasesWithDefaultAcceptance( // // In both cases: take old entry. ++mc.mOldEntriesDefaultAccepted; - Bucket::checkProtocolLegality(*oi, protocolVersion); - countOldEntryType(mc, *oi); + if constexpr (std::is_same_v) + { + LiveBucket::checkProtocolLegality(*oi, protocolVersion); + countOldEntryType(mc, *oi); + } maybePut(out, *oi, shadowIterators, keepShadowedLifecycleEntries, mc); ++oi; return true; @@ -550,8 +651,11 @@ mergeCasesWithDefaultAcceptance( // // In both cases: take new entry. ++mc.mNewEntriesDefaultAccepted; - Bucket::checkProtocolLegality(*ni, protocolVersion); - countNewEntryType(mc, *ni); + if constexpr (std::is_same_v) + { + LiveBucket::checkProtocolLegality(*ni, protocolVersion); + countNewEntryType(mc, *ni); + } maybePut(out, *ni, shadowIterators, keepShadowedLifecycleEntries, mc); ++ni; return true; @@ -562,9 +666,33 @@ mergeCasesWithDefaultAcceptance( // The remaining cases happen when keys are equal and we have to reason // through the relationships of their bucket lifecycle states. Trickier. static void -mergeCasesWithEqualKeys(MergeCounters& mc, BucketInputIterator& oi, - BucketInputIterator& ni, BucketOutputIterator& out, - std::vector& shadowIterators, +mergeCasesWithEqualKeys( + MergeCounters& mc, HotArchiveBucketInputIterator& oi, + HotArchiveBucketInputIterator& ni, HotArchiveBucketOutputIterator& out, + std::vector& shadowIterators, + uint32_t protocolVersion, bool keepShadowedLifecycleEntries) +{ + // If two identical keys have the same type, throw an error. Otherwise, + // take the newer key. + HotArchiveBucketEntry const& oldEntry = *oi; + HotArchiveBucketEntry const& newEntry = *ni; + if (oldEntry.type() == newEntry.type()) + { + throw std::runtime_error( + "Malformed Hot Archive bucket: two identical keys with " + "the same type."); + } + + out.put(newEntry); + ++ni; + ++oi; +} + +static void +mergeCasesWithEqualKeys(MergeCounters& mc, LiveBucketInputIterator& oi, + LiveBucketInputIterator& ni, + LiveBucketOutputIterator& out, + std::vector& shadowIterators, uint32_t protocolVersion, bool keepShadowedLifecycleEntries) { @@ -633,8 +761,8 @@ mergeCasesWithEqualKeys(MergeCounters& mc, BucketInputIterator& oi, BucketEntry const& oldEntry = *oi; BucketEntry const& newEntry = *ni; - Bucket::checkProtocolLegality(oldEntry, protocolVersion); - Bucket::checkProtocolLegality(newEntry, protocolVersion); + LiveBucket::checkProtocolLegality(oldEntry, protocolVersion); + LiveBucket::checkProtocolLegality(newEntry, protocolVersion); countOldEntryType(mc, oldEntry); countNewEntryType(mc, newEntry); @@ -684,107 +812,18 @@ mergeCasesWithEqualKeys(MergeCounters& mc, BucketInputIterator& oi, ++ni; } -bool -Bucket::scanForEvictionLegacy(AbstractLedgerTxn& ltx, EvictionIterator& iter, - uint32_t& bytesToScan, - uint32_t& remainingEntriesToEvict, - uint32_t ledgerSeq, - medida::Counter& entriesEvictedCounter, - medida::Counter& bytesScannedForEvictionCounter, - std::shared_ptr stats) const -{ - ZoneScoped; - releaseAssert(stats); - - if (isEmpty() || - protocolVersionIsBefore(getBucketVersion(shared_from_this()), - SOROBAN_PROTOCOL_VERSION)) - { - // EOF, skip to next bucket - return false; - } - - if (remainingEntriesToEvict == 0 || bytesToScan == 0) - { - // Reached end of scan region - return true; - } - - XDRInputFileStream stream{}; - stream.open(mFilename.string()); - stream.seek(iter.bucketFileOffset); - - BucketEntry be; - while (stream.readOne(be)) - { - if (be.type() == INITENTRY || be.type() == LIVEENTRY) - { - auto const& le = be.liveEntry(); - if (isTemporaryEntry(le.data)) - { - ZoneNamedN(maybeEvict, "maybe evict entry", true); - - auto ttlKey = getTTLKey(le); - uint32_t liveUntilLedger = 0; - auto shouldEvict = [&] { - auto ttlLtxe = ltx.loadWithoutRecord(ttlKey); - if (!ttlLtxe) - { - // Entry was already deleted either manually or by an - // earlier eviction scan, do nothing - return false; - } - - releaseAssert(ttlLtxe); - liveUntilLedger = - ttlLtxe.current().data.ttl().liveUntilLedgerSeq; - return !isLive(ttlLtxe.current(), ledgerSeq); - }; - - if (shouldEvict()) - { - ZoneNamedN(evict, "evict entry", true); - auto age = ledgerSeq - liveUntilLedger; - stats->recordEvictedEntry(age); - - ltx.erase(ttlKey); - ltx.erase(LedgerEntryKey(le)); - entriesEvictedCounter.inc(); - --remainingEntriesToEvict; - } - } - } - - auto newPos = stream.pos(); - auto bytesRead = newPos - iter.bucketFileOffset; - iter.bucketFileOffset = newPos; - bytesScannedForEvictionCounter.inc(bytesRead); - if (bytesRead >= bytesToScan) - { - // Reached end of scan region - bytesToScan = 0; - return true; - } - else if (remainingEntriesToEvict == 0) - { - return true; - } - - bytesToScan -= bytesRead; - } - - // Hit eof - return false; -} - -std::shared_ptr +template +std::shared_ptr Bucket::merge(BucketManager& bucketManager, uint32_t maxProtocolVersion, - std::shared_ptr const& oldBucket, - std::shared_ptr const& newBucket, - std::vector> const& shadows, - bool keepDeadEntries, bool countMergeEvents, + std::shared_ptr const& oldBucket, + std::shared_ptr const& newBucket, + std::vector> const& shadows, + bool keepTombstoneEntries, bool countMergeEvents, asio::io_context& ctx, bool doFsync) { + static_assert(std::is_same_v || + std::is_same_v); + ZoneScoped; // This is the key operation in the scheme: merging two (read-only) // buckets together into a new 3rd bucket, while calculating its hash, @@ -794,24 +833,43 @@ Bucket::merge(BucketManager& bucketManager, uint32_t maxProtocolVersion, releaseAssert(newBucket); MergeCounters mc; - BucketInputIterator oi(oldBucket); - BucketInputIterator ni(newBucket); - std::vector shadowIterators(shadows.begin(), - shadows.end()); + BucketInputIterator oi(oldBucket); + BucketInputIterator ni(newBucket); + std::vector> shadowIterators(shadows.begin(), + shadows.end()); uint32_t protocolVersion; bool keepShadowedLifecycleEntries; - calculateMergeProtocolVersion(mc, maxProtocolVersion, oi, ni, - shadowIterators, protocolVersion, - keepShadowedLifecycleEntries); + calculateMergeProtocolVersion(mc, maxProtocolVersion, oi, ni, + shadowIterators, protocolVersion, + keepShadowedLifecycleEntries); auto timer = bucketManager.getMergeTimer().TimeScope(); BucketMetadata meta; meta.ledgerVersion = protocolVersion; - BucketOutputIterator out(bucketManager.getTmpDir(), keepDeadEntries, meta, - mc, ctx, doFsync); - BucketEntryIdCmp cmp; + // If any inputs use the new extension of BucketMeta, the output should as + // well + if (ni.getMetadata().ext.v() == 1) + { + releaseAssertOrThrow(protocolVersionStartsFrom( + maxProtocolVersion, + Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)); + meta.ext = ni.getMetadata().ext; + } + else if (oi.getMetadata().ext.v() == 1) + { + releaseAssertOrThrow(protocolVersionStartsFrom( + maxProtocolVersion, + Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)); + meta.ext = oi.getMetadata().ext; + } + + BucketOutputIterator out(bucketManager.getTmpDir(), + keepTombstoneEntries, meta, mc, ctx, + doFsync); + + BucketEntryIdCmp cmp; size_t iter = 0; while (oi || ni) @@ -843,34 +901,73 @@ Bucket::merge(BucketManager& bucketManager, uint32_t maxProtocolVersion, { bucketManager.incrMergeCounters(mc); } - MergeKey mk{keepDeadEntries, oldBucket, newBucket, shadows}; - return out.getBucket(bucketManager, - bucketManager.getConfig().isUsingBucketListDB(), &mk); + + std::vector shadowHashes; + shadowHashes.reserve(shadows.size()); + for (auto const& s : shadows) + { + shadowHashes.push_back(s->getHash()); + } + + MergeKey mk{keepTombstoneEntries, oldBucket->getHash(), + newBucket->getHash(), shadowHashes}; + return out.getBucket(bucketManager, &mk); +} + +LiveBucket::LiveBucket(std::string const& filename, Hash const& hash, + std::unique_ptr&& index) + : Bucket(filename, hash, std::move(index)) +{ +} + +LiveBucket::LiveBucket() : Bucket() +{ } uint32_t -Bucket::getBucketVersion(std::shared_ptr const& bucket) +LiveBucket::getBucketVersion() const { - releaseAssert(bucket); - BucketInputIterator it(bucket); + LiveBucketInputIterator it(shared_from_this()); return it.getMetadata().ledgerVersion; } uint32_t -Bucket::getBucketVersion(std::shared_ptr const& bucket) +HotArchiveBucket::getBucketVersion() const { - releaseAssert(bucket); - BucketInputIterator it(bucket); + HotArchiveBucketInputIterator it(shared_from_this()); return it.getMetadata().ledgerVersion; } BucketEntryCounters const& -Bucket::getBucketEntryCounters() const +LiveBucket::getBucketEntryCounters() const { releaseAssert(mIndex); return mIndex->getBucketEntryCounters(); } +HotArchiveBucket::HotArchiveBucket(std::string const& filename, + Hash const& hash, + std::unique_ptr&& index) + : Bucket(filename, hash, std::move(index)) +{ +} + +HotArchiveBucket::HotArchiveBucket() : Bucket() +{ +} + +bool +LiveBucket::isTombstoneEntry(BucketEntry const& e) +{ + return e.type() == DEADENTRY; +} + +bool +HotArchiveBucket::isTombstoneEntry(HotArchiveBucketEntry const& e) +{ + return e.type() == HOT_ARCHIVE_LIVE; +} + BucketEntryCounters& BucketEntryCounters::operator+=(BucketEntryCounters const& other) { @@ -897,4 +994,20 @@ BucketEntryCounters::operator!=(BucketEntryCounters const& other) const { return !(*this == other); } + +template std::shared_ptr Bucket::merge( + BucketManager& bucketManager, uint32_t maxProtocolVersion, + std::shared_ptr const& oldBucket, + std::shared_ptr const& newBucket, + std::vector> const& shadows, + bool keepTombstoneEntries, bool countMergeEvents, asio::io_context& ctx, + bool doFsync); + +template std::shared_ptr Bucket::merge( + BucketManager& bucketManager, uint32_t maxProtocolVersion, + std::shared_ptr const& oldBucket, + std::shared_ptr const& newBucket, + std::vector> const& shadows, + bool keepTombstoneEntries, bool countMergeEvents, asio::io_context& ctx, + bool doFsync); } \ No newline at end of file diff --git a/src/bucket/Bucket.h b/src/bucket/Bucket.h index c4b6773949..f6965dce94 100644 --- a/src/bucket/Bucket.h +++ b/src/bucket/Bucket.h @@ -5,6 +5,7 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "bucket/BucketIndex.h" +#include "bucket/BucketSnapshot.h" #include "util/NonCopyable.h" #include "util/ProtocolVersion.h" #include "xdr/Stellar-ledger.h" @@ -36,19 +37,28 @@ namespace stellar * Two buckets can be merged together efficiently (in a single pass): elements * from the newer bucket overwrite elements from the older bucket, the rest are * merged in sorted order, and all elements are hashed while being added. + * + * Different types of BucketList vary on the type of entries they contain and by + * extension the merge logic of those entries. Additionally, some types of + * BucketList may have special operations only relevant to that specific type. + * This pure virtual base class provides the core functionality of a BucketList + * container and must be extened for each specific BucketList type. In + * particular, the fresh and merge functions must be defined for the specific + * type, while other functionality can be shared. */ class AbstractLedgerTxn; class Application; class BucketManager; -class SearchableBucketListSnapshot; struct EvictionResultEntry; class EvictionStatistics; struct BucketEntryCounters; +template class SearchableBucketListSnapshot; +enum class LedgerEntryTypeAndDurability : uint32_t; -class Bucket : public std::enable_shared_from_this, - public NonMovableOrCopyable +class Bucket : public NonMovableOrCopyable { + protected: std::filesystem::path const mFilename; Hash const mHash; size_t mSize{0}; @@ -62,6 +72,9 @@ class Bucket : public std::enable_shared_from_this, std::string ext); public: + static constexpr ProtocolVersion + FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION = ProtocolVersion::V_23; + // Create an empty bucket. The empty bucket has hash '000000...' and its // filename is the empty string. Bucket(); @@ -76,10 +89,6 @@ class Bucket : public std::enable_shared_from_this, std::filesystem::path const& getFilename() const; size_t getSize() const; - // Returns true if a BucketEntry that is key-wise identical to the given - // BucketEntry exists in the bucket. For testing. - bool containsBucketIdentity(BucketEntry const& id) const; - bool isEmpty() const; // Delete index and close file stream @@ -96,6 +105,62 @@ class Bucket : public std::enable_shared_from_this, // Sets index, throws if index is already set void setIndex(std::unique_ptr&& index); + // Merge two buckets together, producing a fresh one. Entries in `oldBucket` + // are overridden in the fresh bucket by keywise-equal entries in + // `newBucket`. Entries are inhibited from the fresh bucket by keywise-equal + // entries in any of the buckets in the provided `shadows` vector. + // + // Each bucket is self-describing in terms of the ledger protocol version it + // was constructed under, and the merge algorithm adjusts to the maximum of + // the versions attached to each input or shadow bucket. The provided + // `maxProtocolVersion` bounds this (for error checking) and should usually + // be the protocol of the ledger header at which the merge is starting. An + // exception will be thrown if any provided bucket versions exceed it. + template + static std::shared_ptr + merge(BucketManager& bucketManager, uint32_t maxProtocolVersion, + std::shared_ptr const& oldBucket, + std::shared_ptr const& newBucket, + std::vector> const& shadows, + bool keepTombstoneEntries, bool countMergeEvents, + asio::io_context& ctx, bool doFsync); + + static std::string randomBucketName(std::string const& tmpDir); + static std::string randomBucketIndexName(std::string const& tmpDir); + +#ifdef BUILD_TESTS + BucketIndex const& + getIndexForTesting() const + { + return getIndex(); + } + +#endif // BUILD_TESTS + + virtual uint32_t getBucketVersion() const = 0; + + template friend class BucketSnapshotBase; +}; + +/* + * Live Buckets are used by the LiveBucketList to store the current canonical + * state of the ledger. They contain entries of type BucketEntry. + */ +class LiveBucket : public Bucket, + public std::enable_shared_from_this +{ + public: + LiveBucket(); + virtual ~LiveBucket() + { + } + LiveBucket(std::string const& filename, Hash const& hash, + std::unique_ptr&& index); + + // Returns true if a BucketEntry that is key-wise identical to the given + // BucketEntry exists in the bucket. For testing. + bool containsBucketIdentity(BucketEntry const& id) const; + // At version 11, we added support for INITENTRY and METAENTRY. Before this // we were only supporting LIVEENTRY and DEADENTRY. static constexpr ProtocolVersion @@ -113,79 +178,80 @@ class Bucket : public std::enable_shared_from_this, std::vector const& liveEntries, std::vector const& deadEntries); - static std::string randomBucketName(std::string const& tmpDir); - static std::string randomBucketIndexName(std::string const& tmpDir); - #ifdef BUILD_TESTS // "Applies" the bucket to the database. For each entry in the bucket, // if the entry is init or live, creates or updates the corresponding // entry in the database (respectively; if the entry is dead (a // tombstone), deletes the corresponding entry in the database. void apply(Application& app) const; - - BucketIndex const& - getIndexForTesting() const - { - return getIndex(); - } - -#endif // BUILD_TESTS +#endif // Returns false if eof reached, true otherwise. Modifies iter as the bucket // is scanned. Also modifies bytesToScan and maxEntriesToEvict such that // after this function returns: // bytesToScan -= amount_bytes_scanned // maxEntriesToEvict -= entries_evicted - bool scanForEvictionLegacy(AbstractLedgerTxn& ltx, EvictionIterator& iter, - uint32_t& bytesToScan, - uint32_t& remainingEntriesToEvict, - uint32_t ledgerSeq, - medida::Counter& entriesEvictedCounter, - medida::Counter& bytesScannedForEvictionCounter, - std::shared_ptr stats) const; - bool scanForEviction(EvictionIterator& iter, uint32_t& bytesToScan, uint32_t ledgerSeq, std::list& evictableKeys, - SearchableBucketListSnapshot& bl) const; + SearchableBucketListSnapshot& bl) const; // Create a fresh bucket from given vectors of init (created) and live // (updated) LedgerEntries, and dead LedgerEntryKeys. The bucket will // be sorted, hashed, and adopted in the provided BucketManager. - static std::shared_ptr + static std::shared_ptr fresh(BucketManager& bucketManager, uint32_t protocolVersion, std::vector const& initEntries, std::vector const& liveEntries, std::vector const& deadEntries, bool countMergeEvents, asio::io_context& ctx, bool doFsync); - // Merge two buckets together, producing a fresh one. Entries in `oldBucket` - // are overridden in the fresh bucket by keywise-equal entries in - // `newBucket`. Entries are inhibited from the fresh bucket by keywise-equal - // entries in any of the buckets in the provided `shadows` vector. - // - // Each bucket is self-describing in terms of the ledger protocol version it - // was constructed under, and the merge algorithm adjusts to the maximum of - // the versions attached to each input or shadow bucket. The provided - // `maxProtocolVersion` bounds this (for error checking) and should usually - // be the protocol of the ledger header at which the merge is starting. An - // exception will be thrown if any provided bucket versions exceed it. - static std::shared_ptr - merge(BucketManager& bucketManager, uint32_t maxProtocolVersion, - std::shared_ptr const& oldBucket, - std::shared_ptr const& newBucket, - std::vector> const& shadows, - bool keepDeadEntries, bool countMergeEvents, asio::io_context& ctx, - bool doFsync); - - static uint32_t getBucketVersion(std::shared_ptr const& bucket); - static uint32_t - getBucketVersion(std::shared_ptr const& bucket); + // Returns true if the given BucketEntry should be dropped in the bottom + // level bucket (i.e. DEADENTRY) + static bool isTombstoneEntry(BucketEntry const& e); + + uint32_t getBucketVersion() const override; + BucketEntryCounters const& getBucketEntryCounters() const; - friend class BucketSnapshot; + + friend class LiveBucketSnapshot; +}; + +/* + * Hot Archive Buckets are used by the HotBucketList to store recently evicted + * entries. They contain entries of type HotArchiveBucketEntry. + */ +class HotArchiveBucket : public Bucket, + public std::enable_shared_from_this +{ + static std::vector + convertToBucketEntry(std::vector const& archivedEntries, + std::vector const& restoredEntries, + std::vector const& deletedEntries); + + public: + HotArchiveBucket(); + virtual ~HotArchiveBucket() + { + } + HotArchiveBucket(std::string const& filename, Hash const& hash, + std::unique_ptr&& index); + uint32_t getBucketVersion() const override; + + static std::shared_ptr + fresh(BucketManager& bucketManager, uint32_t protocolVersion, + std::vector const& archivedEntries, + std::vector const& restoredEntries, + std::vector const& deletedEntries, bool countMergeEvents, + asio::io_context& ctx, bool doFsync); + + // Returns true if the given BucketEntry should be dropped in the bottom + // level bucket (i.e. HOT_ARCHIVE_LIVE) + static bool isTombstoneEntry(HotArchiveBucketEntry const& e); + + friend class HotArchiveBucketSnapshot; }; -enum class LedgerEntryTypeAndDurability : uint32_t; struct BucketEntryCounters { std::map entryTypeCounts; diff --git a/src/bucket/BucketApplicator.cpp b/src/bucket/BucketApplicator.cpp index 7c739aa6f2..a9400c9d18 100644 --- a/src/bucket/BucketApplicator.cpp +++ b/src/bucket/BucketApplicator.cpp @@ -9,6 +9,7 @@ #include "ledger/LedgerTxn.h" #include "ledger/LedgerTxnEntry.h" #include "main/Application.h" +#include "util/GlobalChecks.h" #include "util/Logging.h" #include "util/types.h" #include @@ -20,15 +21,13 @@ BucketApplicator::BucketApplicator(Application& app, uint32_t maxProtocolVersion, uint32_t minProtocolVersionSeen, uint32_t level, - std::shared_ptr bucket, - std::function filter, + std::shared_ptr bucket, std::unordered_set& seenKeys) : mApp(app) , mMaxProtocolVersion(maxProtocolVersion) , mMinProtocolVersionSeen(minProtocolVersionSeen) , mLevel(level) , mBucketIter(bucket) - , mEntryTypeFilter(filter) , mSeenKeys(seenKeys) { auto protocolVersion = mBucketIter.getMetadata().ledgerVersion; @@ -40,8 +39,8 @@ BucketApplicator::BucketApplicator(Application& app, protocolVersion, mMaxProtocolVersion)); } - // Only apply offers if BucketListDB is enabled - if (mApp.getConfig().isUsingBucketListDB() && !bucket->isEmpty()) + // Only apply offers + if (!bucket->isEmpty()) { auto offsetOp = bucket->getOfferRange(); if (offsetOp) @@ -62,10 +61,8 @@ BucketApplicator::operator bool() const { // There is more work to do (i.e. (bool) *this == true) iff: // 1. The underlying bucket iterator is not EOF and - // 2. Either BucketListDB is not enabled (so we must apply all entry types) - // or BucketListDB is enabled and we have offers still remaining. - return static_cast(mBucketIter) && - (!mApp.getConfig().isUsingBucketListDB() || mOffersRemaining); + // 2. We have offers still remaining. + return static_cast(mBucketIter) && mOffersRemaining; } size_t @@ -81,12 +78,11 @@ BucketApplicator::size() const } static bool -shouldApplyEntry(std::function const& filter, - BucketEntry const& e) +shouldApplyEntry(BucketEntry const& e) { if (e.type() == LIVEENTRY || e.type() == INITENTRY) { - return filter(e.liveEntry().data.type()); + return BucketIndex::typeNotSupported(e.liveEntry().data.type()); } if (e.type() != DEADENTRY) @@ -94,7 +90,7 @@ shouldApplyEntry(std::function const& filter, throw std::runtime_error( "Malformed bucket: unexpected non-INIT/LIVE/DEAD entry."); } - return filter(e.deadEntry().type()); + return BucketIndex::typeNotSupported(e.deadEntry().type()); } size_t @@ -110,11 +106,13 @@ BucketApplicator::advance(BucketApplicator::Counters& counters) // directly instead of creating a temporary inner LedgerTxn // as "advance" commits changes during each step this does not introduce any // new failure mode +#ifdef BUILD_TESTS if (mApp.getConfig().MODE_USES_IN_MEMORY_LEDGER) { ltx = static_cast(&root); } else +#endif { innerLtx = std::make_unique(root, false); ltx = innerLtx.get(); @@ -127,99 +125,73 @@ BucketApplicator::advance(BucketApplicator::Counters& counters) // returns the file offset at the end of the currently loaded entry. // This means we must read until pos is strictly greater than the upper // bound so that we don't skip the last offer in the range. - auto isUsingBucketListDB = mApp.getConfig().isUsingBucketListDB(); - if (isUsingBucketListDB && mBucketIter.pos() > mUpperBoundOffset) + if (mBucketIter.pos() > mUpperBoundOffset) { mOffersRemaining = false; break; } BucketEntry const& e = *mBucketIter; - Bucket::checkProtocolLegality(e, mMaxProtocolVersion); + LiveBucket::checkProtocolLegality(e, mMaxProtocolVersion); - if (shouldApplyEntry(mEntryTypeFilter, e)) + if (shouldApplyEntry(e)) { - if (isUsingBucketListDB) + if (e.type() == LIVEENTRY || e.type() == INITENTRY) { - if (e.type() == LIVEENTRY || e.type() == INITENTRY) - { - auto [_, wasInserted] = - mSeenKeys.emplace(LedgerEntryKey(e.liveEntry())); + auto [_, wasInserted] = + mSeenKeys.emplace(LedgerEntryKey(e.liveEntry())); - // Skip seen keys - if (!wasInserted) - { - continue; - } - } - else + // Skip seen keys + if (!wasInserted) { - // Only apply INIT and LIVE entries - mSeenKeys.emplace(e.deadEntry()); continue; } } + else + { + // Only apply INIT and LIVE entries + mSeenKeys.emplace(e.deadEntry()); + continue; + } counters.mark(e); - if (e.type() == LIVEENTRY || e.type() == INITENTRY) + // DEAD and META entries skipped + releaseAssert(e.type() == LIVEENTRY || e.type() == INITENTRY); + // The last level can have live entries, but at that point we + // know that they are actually init entries because the earliest + // state of all entries is init, so we mark them as such here + if (mLevel == LiveBucketList::kNumLevels - 1 && + e.type() == LIVEENTRY) { - // The last level can have live entries, but at that point we - // know that they are actually init entries because the earliest - // state of all entries is init, so we mark them as such here - if (mLevel == BucketList::kNumLevels - 1 && - e.type() == LIVEENTRY) - { - ltx->createWithoutLoading(e.liveEntry()); - } - else if ( - protocolVersionIsBefore( - mMinProtocolVersionSeen, - Bucket:: - FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY)) + ltx->createWithoutLoading(e.liveEntry()); + } + else if (protocolVersionIsBefore( + mMinProtocolVersionSeen, + LiveBucket:: + FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY)) + { + // Prior to protocol 11, INITENTRY didn't exist, so we need + // to check ltx to see if this is an update or a create + auto key = InternalLedgerEntry(e.liveEntry()).toKey(); + if (ltx->getNewestVersion(key)) { - // Prior to protocol 11, INITENTRY didn't exist, so we need - // to check ltx to see if this is an update or a create - auto key = InternalLedgerEntry(e.liveEntry()).toKey(); - if (ltx->getNewestVersion(key)) - { - ltx->updateWithoutLoading(e.liveEntry()); - } - else - { - ltx->createWithoutLoading(e.liveEntry()); - } + ltx->updateWithoutLoading(e.liveEntry()); } else { - if (e.type() == LIVEENTRY) - { - ltx->updateWithoutLoading(e.liveEntry()); - } - else - { - ltx->createWithoutLoading(e.liveEntry()); - } + ltx->createWithoutLoading(e.liveEntry()); } } else { - releaseAssertOrThrow(!isUsingBucketListDB); - if (protocolVersionIsBefore( - mMinProtocolVersionSeen, - Bucket:: - FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY)) + if (e.type() == LIVEENTRY) { - // Prior to protocol 11, DEAD entries could exist - // without LIVE entries in between - if (ltx->getNewestVersion(e.deadEntry())) - { - ltx->eraseWithoutLoading(e.deadEntry()); - } + ltx->updateWithoutLoading(e.liveEntry()); } else { - ltx->eraseWithoutLoading(e.deadEntry()); + ltx->createWithoutLoading(e.liveEntry()); } } diff --git a/src/bucket/BucketApplicator.h b/src/bucket/BucketApplicator.h index 88bc58ff6a..f96b6f4e44 100644 --- a/src/bucket/BucketApplicator.h +++ b/src/bucket/BucketApplicator.h @@ -24,9 +24,8 @@ class BucketApplicator uint32_t mMaxProtocolVersion; uint32_t mMinProtocolVersionSeen; uint32_t mLevel; - BucketInputIterator mBucketIter; + LiveBucketInputIterator mBucketIter; size_t mCount{0}; - std::function mEntryTypeFilter; std::unordered_set& mSeenKeys; std::streamoff mUpperBoundOffset{0}; bool mOffersRemaining{true}; @@ -72,8 +71,7 @@ class BucketApplicator // When this flag is set, each offer key read is added to seenKeys BucketApplicator(Application& app, uint32_t maxProtocolVersion, uint32_t minProtocolVersionSeen, uint32_t level, - std::shared_ptr bucket, - std::function filter, + std::shared_ptr bucket, std::unordered_set& seenKeys); operator bool() const; size_t advance(Counters& counters); diff --git a/src/bucket/BucketIndex.h b/src/bucket/BucketIndex.h index 7dd34fc999..25604e28c0 100644 --- a/src/bucket/BucketIndex.h +++ b/src/bucket/BucketIndex.h @@ -84,6 +84,7 @@ class BucketIndex : public NonMovableOrCopyable // the largest buckets) and should only be called once. If pageSize == 0 or // if file size is less than the cutoff, individual key index is used. // Otherwise range index is used, with the range defined by pageSize. + template static std::unique_ptr createIndex(BucketManager& bm, std::filesystem::path const& filename, Hash const& hash); diff --git a/src/bucket/BucketIndexImpl.cpp b/src/bucket/BucketIndexImpl.cpp index f4108d0fb3..39f30055ea 100644 --- a/src/bucket/BucketIndexImpl.cpp +++ b/src/bucket/BucketIndexImpl.cpp @@ -25,6 +25,7 @@ #include #include +#include #include namespace stellar @@ -66,13 +67,18 @@ BucketIndex::typeNotSupported(LedgerEntryType t) } template +template BucketIndexImpl::BucketIndexImpl(BucketManager& bm, std::filesystem::path const& filename, std::streamoff pageSize, - Hash const& hash) + Hash const& hash, + BucketEntryT const& typeTag) : mBloomMissMeter(bm.getBloomMissMeter()) , mBloomLookupMeter(bm.getBloomLookupMeter()) { + static_assert(std::is_same_v || + std::is_same_v); + ZoneScoped; releaseAssert(!filename.empty()); @@ -94,7 +100,7 @@ BucketIndexImpl::BucketIndexImpl(BucketManager& bm, in.open(filename.string()); std::streamoff pos = 0; std::streamoff pageUpperBound = 0; - BucketEntry be; + BucketEntryT be; size_t iter = 0; size_t count = 0; @@ -126,35 +132,51 @@ BucketIndexImpl::BucketIndexImpl(BucketManager& bm, } } - if (be.type() != METAENTRY) + auto isMeta = [](auto const& be) { + if constexpr (std::is_same::value) + { + return be.type() == METAENTRY; + } + else + { + return be.type() == HOT_ARCHIVE_METAENTRY; + } + }; + + if (!isMeta(be)) { ++count; LedgerKey key = getBucketLedgerKey(be); - // We need an asset to poolID mapping for - // loadPoolshareTrustlineByAccountAndAsset queries. For this - // query, we only need to index INIT entries because: - // 1. PoolID is the hash of the Assets it refers to, so this - // index cannot be invalidated by newer LIVEENTRY updates - // 2. We do a join over all bucket indexes so we avoid storing - // multiple redundant index entries (i.e. LIVEENTRY updates) - // 3. We only use this index to collect the possible set of - // Trustline keys, then we load those keys. This means that - // we don't need to keep track of DEADENTRY. Even if a given - // INITENTRY has been deleted by a newer DEADENTRY, the - // trustline load will not return deleted trustlines, so the - // load result is still correct even if the index has a few - // deleted mappings. - if (be.type() == INITENTRY && key.type() == LIQUIDITY_POOL) + if constexpr (std::is_same_v) { - auto const& poolParams = be.liveEntry() - .data.liquidityPool() - .body.constantProduct() - .params; - mData.assetToPoolID[poolParams.assetA].emplace_back( - key.liquidityPool().liquidityPoolID); - mData.assetToPoolID[poolParams.assetB].emplace_back( - key.liquidityPool().liquidityPoolID); + // We need an asset to poolID mapping for + // loadPoolshareTrustlineByAccountAndAsset queries. For this + // query, we only need to index INIT entries because: + // 1. PoolID is the hash of the Assets it refers to, so this + // index cannot be invalidated by newer LIVEENTRY updates + // 2. We do a join over all bucket indexes so we avoid + // storing + // multiple redundant index entries (i.e. LIVEENTRY + // updates) + // 3. We only use this index to collect the possible set of + // Trustline keys, then we load those keys. This means + // that we don't need to keep track of DEADENTRY. Even if + // a given INITENTRY has been deleted by a newer + // DEADENTRY, the trustline load will not return deleted + // trustlines, so the load result is still correct even + // if the index has a few deleted mappings. + if (be.type() == INITENTRY && key.type() == LIQUIDITY_POOL) + { + auto const& poolParams = be.liveEntry() + .data.liquidityPool() + .body.constantProduct() + .params; + mData.assetToPoolID[poolParams.assetA].emplace_back( + key.liquidityPool().liquidityPoolID); + mData.assetToPoolID[poolParams.assetB].emplace_back( + key.liquidityPool().liquidityPoolID); + } } if constexpr (std::is_same::value) @@ -182,7 +204,11 @@ BucketIndexImpl::BucketIndexImpl(BucketManager& bm, { mData.keysToOffset.emplace_back(key, pos); } - countEntry(be); + + if constexpr (std::is_same::value) + { + countEntry(be); + } } pos = in.pos(); @@ -203,7 +229,7 @@ BucketIndexImpl::BucketIndexImpl(BucketManager& bm, ZoneValue(static_cast(count)); } - if (bm.getConfig().isPersistingBucketListDBIndexes()) + if (bm.getConfig().BUCKETLIST_DB_PERSIST_INDEX) { saveToDisk(bm, hash); } @@ -224,7 +250,7 @@ BucketIndexImpl::saveToDisk(BucketManager& bm, Hash const& hash) const { ZoneScoped; - releaseAssert(bm.getConfig().isPersistingBucketListDBIndexes()); + releaseAssert(bm.getConfig().BUCKETLIST_DB_PERSIST_INDEX); auto timer = LogSlowExecution("Saving index", LogSlowExecution::Mode::AUTOMATIC_RAII, "took", std::chrono::milliseconds(100)); @@ -328,14 +354,17 @@ upper_bound_pred(LedgerKey const& key, IndexEntryT const& indexEntry) } } +template std::unique_ptr BucketIndex::createIndex(BucketManager& bm, std::filesystem::path const& filename, Hash const& hash) { + static_assert(std::is_same_v || + std::is_same_v); + ZoneScoped; auto const& cfg = bm.getConfig(); - releaseAssertOrThrow(cfg.isUsingBucketListDB()); releaseAssertOrThrow(!filename.empty()); auto pageSize = effectivePageSize(cfg, fs::size(filename.string())); @@ -348,7 +377,8 @@ BucketIndex::createIndex(BucketManager& bm, "bucket {}", filename); return std::unique_ptr const>( - new BucketIndexImpl(bm, filename, 0, hash)); + new BucketIndexImpl(bm, filename, 0, hash, + BucketEntryT{})); } else { @@ -358,7 +388,8 @@ BucketIndex::createIndex(BucketManager& bm, "{} in bucket {}", pageSize, filename); return std::unique_ptr const>( - new BucketIndexImpl(bm, filename, pageSize, hash)); + new BucketIndexImpl(bm, filename, pageSize, hash, + BucketEntryT{})); } } // BucketIndexImpl throws if BucketManager shuts down before index finishes, @@ -541,9 +572,18 @@ BucketIndexImpl::operator==(BucketIndex const& inRaw) const if constexpr (std::is_same::value) { - releaseAssert(mData.filter); - releaseAssert(in.mData.filter); - if (!(*(mData.filter) == *(in.mData.filter))) + if (!mData.filter) + { + if (in.mData.filter) + { + return false; + } + } + else if (!in.mData.filter) + { + return false; + } + else if (!(*(mData.filter) == *(in.mData.filter))) { return false; } @@ -605,4 +645,12 @@ BucketIndexImpl::getBucketEntryCounters() const { return mData.counters; } + +template std::unique_ptr +BucketIndex::createIndex(BucketManager& bm, + std::filesystem::path const& filename, + Hash const& hash); +template std::unique_ptr +BucketIndex::createIndex( + BucketManager& bm, std::filesystem::path const& filename, Hash const& hash); } diff --git a/src/bucket/BucketIndexImpl.h b/src/bucket/BucketIndexImpl.h index d34155b055..f23ea9bdca 100644 --- a/src/bucket/BucketIndexImpl.h +++ b/src/bucket/BucketIndexImpl.h @@ -59,8 +59,13 @@ template class BucketIndexImpl : public BucketIndex medida::Meter& mBloomMissMeter; medida::Meter& mBloomLookupMeter; + // Templated constructors are valid C++, but since this is a templated class + // already, there's no way for the compiler to deduce the type without a + // templated parameter, hence the tag + template BucketIndexImpl(BucketManager& bm, std::filesystem::path const& filename, - std::streamoff pageSize, Hash const& hash); + std::streamoff pageSize, Hash const& hash, + BucketEntryT const& typeTag); template BucketIndexImpl(BucketManager const& bm, Archive& ar, diff --git a/src/bucket/BucketInputIterator.cpp b/src/bucket/BucketInputIterator.cpp index 7a3673b7f4..da3b4a97eb 100644 --- a/src/bucket/BucketInputIterator.cpp +++ b/src/bucket/BucketInputIterator.cpp @@ -4,7 +4,9 @@ #include "bucket/BucketInputIterator.h" #include "bucket/Bucket.h" +#include "xdr/Stellar-ledger.h" #include +#include namespace stellar { @@ -12,14 +14,25 @@ namespace stellar * Helper class that reads from the file underlying a bucket, keeping the bucket * alive for the duration of its existence. */ +template void -BucketInputIterator::loadEntry() +BucketInputIterator::loadEntry() { ZoneScoped; if (mIn.readOne(mEntry)) { mEntryPtr = &mEntry; - if (mEntry.type() == METAENTRY) + bool isMeta; + if constexpr (std::is_same_v) + { + isMeta = mEntry.type() == METAENTRY; + } + else + { + isMeta = mEntry.type() == HOT_ARCHIVE_METAENTRY; + } + + if (isMeta) { // There should only be one METAENTRY in the input stream // and it should be the first record. @@ -34,6 +47,18 @@ BucketInputIterator::loadEntry() "Malformed bucket: META after other entries."); } mMetadata = mEntry.metaEntry(); + + if constexpr (std::is_same::value) + { + if (mMetadata.ext.v() != 1 || + mMetadata.ext.bucketListType() != HOT_ARCHIVE) + { + throw std::runtime_error( + "Malformed bucket: META entry with incorrect bucket " + "list type."); + } + } + mSeenMetadata = true; loadEntry(); } @@ -42,7 +67,11 @@ BucketInputIterator::loadEntry() mSeenOtherEntries = true; if (mSeenMetadata) { - Bucket::checkProtocolLegality(mEntry, mMetadata.ledgerVersion); + if constexpr (std::is_same_v) + { + LiveBucket::checkProtocolLegality(mEntry, + mMetadata.ledgerVersion); + } } } } @@ -52,42 +81,48 @@ BucketInputIterator::loadEntry() } } +template std::streamoff -BucketInputIterator::pos() +BucketInputIterator::pos() { return mIn.pos(); } +template size_t -BucketInputIterator::size() const +BucketInputIterator::size() const { return mIn.size(); } -BucketInputIterator::operator bool() const +template BucketInputIterator::operator bool() const { return mEntryPtr != nullptr; } -BucketEntry const& -BucketInputIterator::operator*() +template +typename BucketInputIterator::BucketEntryT const& +BucketInputIterator::operator*() { return *mEntryPtr; } +template bool -BucketInputIterator::seenMetadata() const +BucketInputIterator::seenMetadata() const { return mSeenMetadata; } +template BucketMetadata const& -BucketInputIterator::getMetadata() const +BucketInputIterator::getMetadata() const { return mMetadata; } -BucketInputIterator::BucketInputIterator(std::shared_ptr bucket) +template +BucketInputIterator::BucketInputIterator(std::shared_ptr bucket) : mBucket(bucket), mEntryPtr(nullptr), mSeenMetadata(false) { // In absence of metadata, we treat every bucket as though it is from ledger @@ -106,13 +141,14 @@ BucketInputIterator::BucketInputIterator(std::shared_ptr bucket) } } -BucketInputIterator::~BucketInputIterator() +template BucketInputIterator::~BucketInputIterator() { mIn.close(); } -BucketInputIterator& -BucketInputIterator::operator++() +template +BucketInputIterator& +BucketInputIterator::operator++() { if (mIn) { @@ -125,10 +161,14 @@ BucketInputIterator::operator++() return *this; } +template void -BucketInputIterator::seek(std::streamoff offset) +BucketInputIterator::seek(std::streamoff offset) { mIn.seek(offset); loadEntry(); } + +template class BucketInputIterator; +template class BucketInputIterator; } diff --git a/src/bucket/BucketInputIterator.h b/src/bucket/BucketInputIterator.h index 02bdb2f3ea..ffccf33cd0 100644 --- a/src/bucket/BucketInputIterator.h +++ b/src/bucket/BucketInputIterator.h @@ -8,23 +8,32 @@ #include "xdr/Stellar-ledger.h" #include +#include namespace stellar { class Bucket; +class LiveBucket; +class HotArchiveBucket; // Helper class that reads through the entries in a bucket. -class BucketInputIterator +template class BucketInputIterator { - std::shared_ptr mBucket; + static_assert(std::is_same_v || + std::is_same_v); + + using BucketEntryT = std::conditional_t, + BucketEntry, HotArchiveBucketEntry>; + + std::shared_ptr mBucket; // Validity and current-value of the iterator is funneled into a // pointer. If // non-null, it points to mEntry. - BucketEntry const* mEntryPtr{nullptr}; + BucketEntryT const* mEntryPtr{nullptr}; XDRInputFileStream mIn; - BucketEntry mEntry; + BucketEntryT mEntry; bool mSeenMetadata{false}; bool mSeenOtherEntries{false}; BucketMetadata mMetadata; @@ -43,9 +52,9 @@ class BucketInputIterator bool seenMetadata() const; BucketMetadata const& getMetadata() const; - BucketEntry const& operator*(); + BucketEntryT const& operator*(); - BucketInputIterator(std::shared_ptr bucket); + BucketInputIterator(std::shared_ptr bucket); ~BucketInputIterator(); @@ -55,4 +64,7 @@ class BucketInputIterator size_t size() const; void seek(std::streamoff offset); }; + +typedef BucketInputIterator LiveBucketInputIterator; +typedef BucketInputIterator HotArchiveBucketInputIterator; } diff --git a/src/bucket/BucketList.cpp b/src/bucket/BucketList.cpp index e714280a7e..9b77655603 100644 --- a/src/bucket/BucketList.cpp +++ b/src/bucket/BucketList.cpp @@ -25,15 +25,23 @@ namespace stellar { -BucketLevel::BucketLevel(uint32_t i) +template <> BucketListDepth BucketListBase::kNumLevels = 11; + +// TODO: This is an arbitrary number. Do some analysis and pick a better value +// or make this a configurable network config. +template <> BucketListDepth BucketListBase::kNumLevels = 9; + +template +BucketLevel::BucketLevel(uint32_t i) : mLevel(i) - , mCurr(std::make_shared()) - , mSnap(std::make_shared()) + , mCurr(std::make_shared()) + , mSnap(std::make_shared()) { } +template uint256 -BucketLevel::getHash() const +BucketLevel::getHash() const { SHA256 hsh; hsh.add(mCurr->getHash()); @@ -41,47 +49,59 @@ BucketLevel::getHash() const return hsh.finish(); } -FutureBucket const& -BucketLevel::getNext() const +template +FutureBucket const& +BucketLevel::getNext() const { return mNextCurr; } -FutureBucket& -BucketLevel::getNext() +template +FutureBucket& +BucketLevel::getNext() { return mNextCurr; } +template void -BucketLevel::setNext(FutureBucket const& fb) +BucketLevel::setNext(FutureBucket const& fb) { releaseAssert(threadIsMain()); mNextCurr = fb; } -std::shared_ptr -BucketLevel::getCurr() const +template +std::shared_ptr +BucketLevel::getCurr() const { return mCurr; } -std::shared_ptr -BucketLevel::getSnap() const +template +std::shared_ptr +BucketLevel::getSnap() const { return mSnap; } +template void -BucketLevel::setCurr(std::shared_ptr b) +BucketLevel::setCurr(std::shared_ptr b) { releaseAssert(threadIsMain()); mNextCurr.clear(); mCurr = b; } +template BucketListBase::~BucketListBase() +{ +} + +template bool -BucketList::shouldMergeWithEmptyCurr(uint32_t ledger, uint32_t level) +BucketListBase::shouldMergeWithEmptyCurr(uint32_t ledger, + uint32_t level) { if (level != 0) @@ -89,7 +109,7 @@ BucketList::shouldMergeWithEmptyCurr(uint32_t ledger, uint32_t level) // Round down the current ledger to when the merge was started, and // re-start the merge via prepare, mimicking the logic in `addBatch` auto mergeStartLedger = - roundDown(ledger, BucketList::levelHalf(level - 1)); + roundDown(ledger, BucketListBase::levelHalf(level - 1)); // Subtle: We're "preparing the next state" of this level's mCurr, which // is *either* mCurr merged with snap, or else just snap (if mCurr is @@ -107,15 +127,17 @@ BucketList::shouldMergeWithEmptyCurr(uint32_t ledger, uint32_t level) return false; } +template void -BucketLevel::setSnap(std::shared_ptr b) +BucketLevel::setSnap(std::shared_ptr b) { releaseAssert(threadIsMain()); mSnap = b; } +template void -BucketLevel::commit() +BucketLevel::commit() { if (mNextCurr.isLive()) { @@ -158,35 +180,51 @@ BucketLevel::commit() // ---------------------------------------------------------------------------------------- // ... // clang-format on +template void -BucketLevel::prepare(Application& app, uint32_t currLedger, - uint32_t currLedgerProtocol, std::shared_ptr snap, - std::vector> const& shadows, - bool countMergeEvents) +BucketLevel::prepare( + Application& app, uint32_t currLedger, uint32_t currLedgerProtocol, + std::shared_ptr snap, + std::vector> const& shadows, bool countMergeEvents) { ZoneScoped; // If more than one absorb is pending at the same time, we have a logic // error in our caller (and all hell will break loose). releaseAssert(!mNextCurr.isMerging()); - auto curr = BucketList::shouldMergeWithEmptyCurr(currLedger, mLevel) - ? std::make_shared() - : mCurr; - - auto shadowsBasedOnProtocol = - protocolVersionStartsFrom(Bucket::getBucketVersion(snap), - Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED) - ? std::vector>() - : shadows; - mNextCurr = FutureBucket(app, curr, snap, shadowsBasedOnProtocol, - currLedgerProtocol, countMergeEvents, mLevel); + auto curr = + BucketListBase::shouldMergeWithEmptyCurr(currLedger, mLevel) + ? std::make_shared() + : mCurr; + + if constexpr (std::is_same_v) + { + auto shadowsBasedOnProtocol = + protocolVersionStartsFrom( + snap->getBucketVersion(), + LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED) + ? std::vector>() + : shadows; + mNextCurr = + FutureBucket(app, curr, snap, shadowsBasedOnProtocol, + currLedgerProtocol, countMergeEvents, mLevel); + } + else + { + // HotArchive only exists for protocol > 21, should never have shadows + mNextCurr = + FutureBucket(app, curr, snap, /*shadows=*/{}, + currLedgerProtocol, countMergeEvents, mLevel); + } + releaseAssert(mNextCurr.isMerging()); } -std::shared_ptr -BucketLevel::snap() +template +std::shared_ptr +BucketLevel::snap() { mSnap = mCurr; - mCurr = std::make_shared(); + mCurr = std::make_shared(); return mSnap; } @@ -221,8 +259,9 @@ BucketListDepth::operator uint32_t() const // levelSize(8) = 262144=0x040000 // levelSize(9) = 1048576=0x100000 // levelSize(10) = 4194304=0x400000 +template uint32_t -BucketList::levelSize(uint32_t level) +BucketListBase::levelSize(uint32_t level) { releaseAssert(level < kNumLevels); return 1UL << (2 * (level + 1)); @@ -243,14 +282,16 @@ BucketList::levelSize(uint32_t level) // levelHalf(8) = 131072=0x020000 // levelHalf(9) = 524288=0x080000 // levelHalf(10) = 2097152=0x200000 +template uint32_t -BucketList::levelHalf(uint32_t level) +BucketListBase::levelHalf(uint32_t level) { return levelSize(level) >> 1; } +template uint32_t -BucketList::sizeOfCurr(uint32_t ledger, uint32_t level) +BucketListBase::sizeOfCurr(uint32_t ledger, uint32_t level) { releaseAssert(ledger != 0); releaseAssert(level < kNumLevels); @@ -261,7 +302,8 @@ BucketList::sizeOfCurr(uint32_t ledger, uint32_t level) auto const size = levelSize(level); auto const half = levelHalf(level); - if (level != BucketList::kNumLevels - 1 && roundDown(ledger, half) != 0) + if (level != BucketListBase::kNumLevels - 1 && + roundDown(ledger, half) != 0) { uint32_t const sizeDelta = 1UL << (2 * level - 1); if (roundDown(ledger, half) == ledger || @@ -297,12 +339,13 @@ BucketList::sizeOfCurr(uint32_t ledger, uint32_t level) } } +template uint32_t -BucketList::sizeOfSnap(uint32_t ledger, uint32_t level) +BucketListBase::sizeOfSnap(uint32_t ledger, uint32_t level) { releaseAssert(ledger != 0); releaseAssert(level < kNumLevels); - if (level == BucketList::kNumLevels - 1) + if (level == BucketListBase::kNumLevels - 1) { return 0; } @@ -323,8 +366,9 @@ BucketList::sizeOfSnap(uint32_t ledger, uint32_t level) } } +template uint32_t -BucketList::oldestLedgerInCurr(uint32_t ledger, uint32_t level) +BucketListBase::oldestLedgerInCurr(uint32_t ledger, uint32_t level) { releaseAssert(ledger != 0); releaseAssert(level < kNumLevels); @@ -343,8 +387,9 @@ BucketList::oldestLedgerInCurr(uint32_t ledger, uint32_t level) return count + 1; } +template uint32_t -BucketList::oldestLedgerInSnap(uint32_t ledger, uint32_t level) +BucketListBase::oldestLedgerInSnap(uint32_t ledger, uint32_t level) { releaseAssert(ledger != 0); releaseAssert(level < kNumLevels); @@ -362,8 +407,9 @@ BucketList::oldestLedgerInSnap(uint32_t ledger, uint32_t level) return count + 1; } +template uint256 -BucketList::getHash() const +BucketListBase::getHash() const { ZoneScoped; SHA256 hsh; @@ -393,8 +439,9 @@ BucketList::getHash() const // // clang-format on +template bool -BucketList::levelShouldSpill(uint32_t ledger, uint32_t level) +BucketListBase::levelShouldSpill(uint32_t ledger, uint32_t level) { if (level == kNumLevels - 1) { @@ -411,8 +458,9 @@ BucketList::levelShouldSpill(uint32_t ledger, uint32_t level) // spill frequency of the level below. // incoming_spill_frequency(i) = 2^(2i - 1) for i > 0 // incoming_spill_frequency(0) = 1 +template uint32_t -BucketList::bucketUpdatePeriod(uint32_t level, bool isCurr) +BucketListBase::bucketUpdatePeriod(uint32_t level, bool isCurr) { if (!isCurr) { @@ -429,26 +477,30 @@ BucketList::bucketUpdatePeriod(uint32_t level, bool isCurr) return 1u << (2 * level - 1); } +template bool -BucketList::keepDeadEntries(uint32_t level) +BucketListBase::keepTombstoneEntries(uint32_t level) { - return level < BucketList::kNumLevels - 1; + return level < BucketListBase::kNumLevels - 1; } -BucketLevel const& -BucketList::getLevel(uint32_t i) const +template +BucketLevel const& +BucketListBase::getLevel(uint32_t i) const { return mLevels.at(i); } -BucketLevel& -BucketList::getLevel(uint32_t i) +template +BucketLevel& +BucketListBase::getLevel(uint32_t i) { return mLevels.at(i); } +template void -BucketList::resolveAnyReadyFutures() +BucketListBase::resolveAnyReadyFutures() { ZoneScoped; for (auto& level : mLevels) @@ -460,8 +512,9 @@ BucketList::resolveAnyReadyFutures() } } +template bool -BucketList::futuresAllResolved(uint32_t maxLevel) const +BucketListBase::futuresAllResolved(uint32_t maxLevel) const { ZoneScoped; releaseAssert(maxLevel < mLevels.size()); @@ -476,8 +529,9 @@ BucketList::futuresAllResolved(uint32_t maxLevel) const return true; } +template uint32_t -BucketList::getMaxMergeLevel(uint32_t currLedger) const +BucketListBase::getMaxMergeLevel(uint32_t currLedger) const { uint32_t i = 0; for (; i < static_cast(mLevels.size()) - 1; ++i) @@ -490,14 +544,15 @@ BucketList::getMaxMergeLevel(uint32_t currLedger) const return i; } +template uint64_t -BucketList::getSize() const +BucketListBase::getSize() const { uint64_t sum = 0; for (auto const& lev : mLevels) { - std::array, 2> buckets = {lev.getCurr(), - lev.getSnap()}; + std::array, 2> buckets = {lev.getCurr(), + lev.getSnap()}; for (auto const& b : buckets) { if (b) @@ -511,16 +566,95 @@ BucketList::getSize() const } void -BucketList::addBatch(Application& app, uint32_t currLedger, - uint32_t currLedgerProtocol, - std::vector const& initEntries, - std::vector const& liveEntries, - std::vector const& deadEntries) +HotArchiveBucketList::addBatch(Application& app, uint32_t currLedger, + uint32_t currLedgerProtocol, + std::vector const& archiveEntries, + std::vector const& restoredEntries, + std::vector const& deletedEntries) +{ + ZoneScoped; + releaseAssert(currLedger > 0); + releaseAssertOrThrow(protocolVersionStartsFrom( + currLedgerProtocol, + Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)); + + for (uint32_t i = static_cast(mLevels.size()) - 1; i != 0; --i) + { + if (levelShouldSpill(currLedger, i - 1)) + { + /** + * At every ledger, level[0] prepares the new batch and commits + * it. + * + * At ledger multiples of 2, level[0] snaps, level[1] commits + * existing (promotes next to curr) and "prepares" by starting a + * merge of that new level[1] curr with the new level[0] snap. This + * is "level 0 spilling". + * + * At ledger multiples of 8, level[1] snaps, level[2] commits + * existing (promotes next to curr) and "prepares" by starting a + * merge of that new level[2] curr with the new level[1] snap. This + * is "level 1 spilling". + * + * At ledger multiples of 32, level[2] snaps, level[3] commits + * existing (promotes next to curr) and "prepares" by starting a + * merge of that new level[3] curr with the new level[2] snap. This + * is "level 2 spilling". + * + * All these have to be done in _reverse_ order (counting down + * levels) because we want a 'curr' to be pulled out of the way into + * a 'snap' the moment it's half-a-level full, not have anything + * else spilled/added to it. + */ + + auto snap = mLevels[i - 1].snap(); + mLevels[i].commit(); + mLevels[i].prepare(app, currLedger, currLedgerProtocol, snap, + /*shadows=*/{}, + /*countMergeEvents=*/true); + } + } + + // In some testing scenarios, we want to inhibit counting level 0 merges + // because they are not repeated when restarting merges on app startup, + // and we are checking for an expected number of merge events on restart. + bool countMergeEvents = + !app.getConfig().ARTIFICIALLY_REDUCE_MERGE_COUNTS_FOR_TESTING; + bool doFsync = !app.getConfig().DISABLE_XDR_FSYNC; + mLevels[0].prepare( + app, currLedger, currLedgerProtocol, + HotArchiveBucket::fresh(app.getBucketManager(), currLedgerProtocol, + archiveEntries, restoredEntries, deletedEntries, + countMergeEvents, app.getClock().getIOContext(), + doFsync), + /*shadows=*/{}, countMergeEvents); + mLevels[0].commit(); + + // We almost always want to try to resolve completed merges to single + // buckets, as it makes restarts less fragile: fewer saved/restored shadows, + // fewer buckets for the user to accidentally delete from their buckets + // dir. Also makes publication less likely to redo a merge that was already + // complete (but not resolved) when the snapshot gets taken. + // + // But we support the option of not-doing so, only for the sake of + // testing. Note: this is nonblocking in any case. + if (!app.getConfig().ARTIFICIALLY_PESSIMIZE_MERGES_FOR_TESTING) + { + resolveAnyReadyFutures(); + } +} + +void +LiveBucketList::addBatch(Application& app, uint32_t currLedger, + uint32_t currLedgerProtocol, + std::vector const& initEntries, + std::vector const& liveEntries, + std::vector const& deadEntries) { ZoneScoped; releaseAssert(currLedger > 0); - std::vector> shadows; + std::vector> shadows; for (auto& level : mLevels) { shadows.push_back(level.getCurr()); @@ -610,12 +744,13 @@ BucketList::addBatch(Application& app, uint32_t currLedger, !app.getConfig().ARTIFICIALLY_REDUCE_MERGE_COUNTS_FOR_TESTING; bool doFsync = !app.getConfig().DISABLE_XDR_FSYNC; releaseAssert(shadows.size() == 0); - mLevels[0].prepare(app, currLedger, currLedgerProtocol, - Bucket::fresh(app.getBucketManager(), currLedgerProtocol, - initEntries, liveEntries, deadEntries, - countMergeEvents, - app.getClock().getIOContext(), doFsync), - shadows, countMergeEvents); + mLevels[0].prepare( + app, currLedger, currLedgerProtocol, + LiveBucket::fresh(app.getBucketManager(), currLedgerProtocol, + initEntries, liveEntries, deadEntries, + countMergeEvents, app.getClock().getIOContext(), + doFsync), + shadows, countMergeEvents); mLevels[0].commit(); // We almost always want to try to resolve completed merges to single @@ -633,7 +768,7 @@ BucketList::addBatch(Application& app, uint32_t currLedger, } BucketEntryCounters -BucketList::sumBucketEntryCounters() const +LiveBucketList::sumBucketEntryCounters() const { BucketEntryCounters counters; for (auto const& lev : mLevels) @@ -651,9 +786,9 @@ BucketList::sumBucketEntryCounters() const } void -BucketList::updateStartingEvictionIterator(EvictionIterator& iter, - uint32_t firstScanLevel, - uint32_t ledgerSeq) +LiveBucketList::updateStartingEvictionIterator(EvictionIterator& iter, + uint32_t firstScanLevel, + uint32_t ledgerSeq) { // Check if an upgrade has changed the starting scan level to below the // current iterator level @@ -676,8 +811,8 @@ BucketList::updateStartingEvictionIterator(EvictionIterator& iter, { // Check if bucket received an incoming spill releaseAssert(iter.bucketListLevel != 0); - if (BucketList::levelShouldSpill(ledgerSeq - 1, - iter.bucketListLevel - 1)) + if (BucketListBase::levelShouldSpill(ledgerSeq - 1, + iter.bucketListLevel - 1)) { // If Bucket changed, reset to start of bucket iter.bucketFileOffset = 0; @@ -685,7 +820,8 @@ BucketList::updateStartingEvictionIterator(EvictionIterator& iter, } else { - if (BucketList::levelShouldSpill(ledgerSeq - 1, iter.bucketListLevel)) + if (BucketListBase::levelShouldSpill(ledgerSeq - 1, + iter.bucketListLevel)) { // If Bucket changed, reset to start of bucket iter.bucketFileOffset = 0; @@ -694,7 +830,7 @@ BucketList::updateStartingEvictionIterator(EvictionIterator& iter, } bool -BucketList::updateEvictionIterAndRecordStats( +LiveBucketList::updateEvictionIterAndRecordStats( EvictionIterator& iter, EvictionIterator startIter, uint32_t configFirstScanLevel, uint32_t ledgerSeq, std::shared_ptr stats, EvictionCounters& counters) @@ -737,10 +873,10 @@ BucketList::updateEvictionIterAndRecordStats( } void -BucketList::checkIfEvictionScanIsStuck(EvictionIterator const& evictionIter, - uint32_t scanSize, - std::shared_ptr b, - EvictionCounters& counters) +LiveBucketList::checkIfEvictionScanIsStuck(EvictionIterator const& evictionIter, + uint32_t scanSize, + std::shared_ptr b, + EvictionCounters& counters) { // Check to see if we can finish scanning the new bucket before it // receives an update @@ -754,61 +890,11 @@ BucketList::checkIfEvictionScanIsStuck(EvictionIterator const& evictionIter, } } -// To avoid noisy data, only count metrics that encompass a complete -// eviction cycle. If a node joins the network mid cycle, metrics will be -// nullopt and be initialized at the start of the next cycle. +template void -BucketList::scanForEvictionLegacy(Application& app, AbstractLedgerTxn& ltx, - uint32_t ledgerSeq, - EvictionCounters& counters, - std::shared_ptr stats) -{ - releaseAssert(stats); - - auto getBucketFromIter = [&levels = mLevels](EvictionIterator const& iter) { - auto& level = levels.at(iter.bucketListLevel); - return iter.isCurrBucket ? level.getCurr() : level.getSnap(); - }; - - auto const& networkConfig = - app.getLedgerManager().getSorobanNetworkConfig(); - auto const firstScanLevel = - networkConfig.stateArchivalSettings().startingEvictionScanLevel; - auto evictionIter = networkConfig.evictionIterator(); - auto scanSize = networkConfig.stateArchivalSettings().evictionScanSize; - auto maxEntriesToEvict = - networkConfig.stateArchivalSettings().maxEntriesToArchive; - - updateStartingEvictionIterator(evictionIter, firstScanLevel, ledgerSeq); - - auto startIter = evictionIter; - auto b = getBucketFromIter(evictionIter); - - while (!b->scanForEvictionLegacy( - ltx, evictionIter, scanSize, maxEntriesToEvict, ledgerSeq, - counters.entriesEvicted, counters.bytesScannedForEviction, stats)) - { - - if (updateEvictionIterAndRecordStats(evictionIter, startIter, - firstScanLevel, ledgerSeq, stats, - counters)) - { - break; - } - - b = getBucketFromIter(evictionIter); - checkIfEvictionScanIsStuck( - evictionIter, - networkConfig.stateArchivalSettings().evictionScanSize, b, - counters); - } - - networkConfig.updateEvictionIterator(ltx, evictionIter); -} - -void -BucketList::restartMerges(Application& app, uint32_t maxProtocolVersion, - uint32_t ledger) +BucketListBase::restartMerges(Application& app, + uint32_t maxProtocolVersion, + uint32_t ledger) { ZoneScoped; for (uint32_t i = 0; i < static_cast(mLevels.size()); i++) @@ -856,9 +942,9 @@ BucketList::restartMerges(Application& app, uint32_t maxProtocolVersion, return; } - auto version = Bucket::getBucketVersion(snap); - if (protocolVersionIsBefore(version, - Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) + auto version = snap->getBucketVersion(); + if (protocolVersionIsBefore( + version, LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) { auto msg = fmt::format( FMT_STRING("Invalid state: bucketlist level {:d} has clear " @@ -870,7 +956,7 @@ BucketList::restartMerges(Application& app, uint32_t maxProtocolVersion, // Round down the current ledger to when the merge was started, and // re-start the merge via prepare, mimicking the logic in `addBatch` auto mergeStartLedger = - roundDown(ledger, BucketList::levelHalf(i - 1)); + roundDown(ledger, BucketListBase::levelHalf(i - 1)); level.prepare( app, mergeStartLedger, version, snap, /* shadows= */ {}, !app.getConfig().ARTIFICIALLY_REDUCE_MERGE_COUNTS_FOR_TESTING); @@ -878,13 +964,16 @@ BucketList::restartMerges(Application& app, uint32_t maxProtocolVersion, } } -BucketListDepth BucketList::kNumLevels = 11; - -BucketList::BucketList() +template BucketListBase::BucketListBase() { for (uint32_t i = 0; i < kNumLevels; ++i) { - mLevels.push_back(BucketLevel(i)); + mLevels.push_back(BucketLevel(i)); } } + +template class BucketListBase; +template class BucketListBase; +template class BucketLevel; +template class BucketLevel; } diff --git a/src/bucket/BucketList.h b/src/bucket/BucketList.h index 09549ac1ad..7344ac9f36 100644 --- a/src/bucket/BucketList.h +++ b/src/bucket/BucketList.h @@ -352,36 +352,39 @@ struct InflationWinner; namespace testutil { -class BucketListDepthModifier; +template class BucketListDepthModifier; } -class BucketLevel +template class BucketLevel { + static_assert(std::is_same_v || + std::is_same_v); + uint32_t mLevel; - FutureBucket mNextCurr; - std::shared_ptr mCurr; - std::shared_ptr mSnap; + FutureBucket mNextCurr; + std::shared_ptr mCurr; + std::shared_ptr mSnap; public: BucketLevel(uint32_t i); uint256 getHash() const; - FutureBucket const& getNext() const; - FutureBucket& getNext(); - std::shared_ptr getCurr() const; - std::shared_ptr getSnap() const; - void setNext(FutureBucket const& fb); - void setCurr(std::shared_ptr); - void setSnap(std::shared_ptr); + FutureBucket const& getNext() const; + FutureBucket& getNext(); + std::shared_ptr getCurr() const; + std::shared_ptr getSnap() const; + void setNext(FutureBucket const& fb); + void setCurr(std::shared_ptr); + void setSnap(std::shared_ptr); void commit(); void prepare(Application& app, uint32_t currLedger, - uint32_t currLedgerProtocol, std::shared_ptr snap, - std::vector> const& shadows, + uint32_t currLedgerProtocol, std::shared_ptr snap, + std::vector> const& shadows, bool countMergeEvents); - std::shared_ptr snap(); + std::shared_ptr snap(); }; // NOTE: The access specifications for this class have been carefully chosen to -// make it so BucketList::kNumLevels can only be modified from +// make it so LiveBucketList::kNumLevels can only be modified from // BucketListDepthModifier -- not even BucketList can modify it. Please // use care when modifying this class. class BucketListDepth @@ -395,14 +398,27 @@ class BucketListDepth operator uint32_t() const; - friend class testutil::BucketListDepthModifier; + template friend class testutil::BucketListDepthModifier; }; -class BucketList +// While every BucketList shares the same high level structure wrt to spill +// schedules, merges at the bucket level, etc, each BucketList type hold +// different types of entries and has different merge logic at the individual +// entry level. This pure virtual base class defines the shared structure of all +// BucketLists. It must be extended for each specific BucketList type, where the +// template parameter BucketT refers to the underlying Bucket type. +template class BucketListBase { - std::vector mLevels; + static_assert(std::is_same_v || + std::is_same_v); + + protected: + std::vector> mLevels; public: + // Trivial pure virtual destructor to make this an abstract class + virtual ~BucketListBase() = 0; + // Number of bucket levels in the bucketlist. Every bucketlist in the system // will have this many levels and it effectively gets wired-in to the // protocol. Careful about changing it. @@ -436,50 +452,30 @@ class BucketList // should spill curr->snap and start merging snap into its next level. static bool levelShouldSpill(uint32_t ledger, uint32_t level); - // Returns true if at given `level` dead entries should be kept. - static bool keepDeadEntries(uint32_t level); + // Returns true if at given `level` tombstone entries should be kept. A + // "tombstone" entry is the entry type that represents null in the given + // BucketList. For LiveBucketList, this is DEADENTRY. For + // HotArchiveBucketList, HOT_ARCHIVE_LIVE. + static bool keepTombstoneEntries(uint32_t level); // Number of ledgers it takes a bucket to spill/receive an incoming spill static uint32_t bucketUpdatePeriod(uint32_t level, bool isCurr); // Create a new BucketList with every `kNumLevels` levels, each with // an empty bucket in `curr` and `snap`. - BucketList(); + BucketListBase(); // Return level `i` of the BucketList. - BucketLevel const& getLevel(uint32_t i) const; + BucketLevel const& getLevel(uint32_t i) const; // Return level `i` of the BucketList. - BucketLevel& getLevel(uint32_t i); + BucketLevel& getLevel(uint32_t i); // Return a cumulative hash of the entire bucketlist; this is the hash of // the concatenation of each level's hash, each of which in turn is the hash // of the concatenation of the hashes of the `curr` and `snap` buckets. Hash getHash() const; - // Reset Eviction Iterator position if an incoming spill or upgrade has - // invalidated the previous position - static void updateStartingEvictionIterator(EvictionIterator& iter, - uint32_t firstScanLevel, - uint32_t ledgerSeq); - - // Update eviction iter and record stats after scanning a region in one - // bucket. Returns true if scan has looped back to startIter, false - // otherwise. - static bool updateEvictionIterAndRecordStats( - EvictionIterator& iter, EvictionIterator startIter, - uint32_t configFirstScanLevel, uint32_t ledgerSeq, - std::shared_ptr stats, EvictionCounters& counters); - - static void checkIfEvictionScanIsStuck(EvictionIterator const& evictionIter, - uint32_t scanSize, - std::shared_ptr b, - EvictionCounters& counters); - - void scanForEvictionLegacy(Application& app, AbstractLedgerTxn& ltx, - uint32_t ledgerSeq, EvictionCounters& counters, - std::shared_ptr stats); - // Restart any merges that might be running on background worker threads, // merging buckets between levels. This needs to be called after forcing a // BucketList to adopt a new state, either at application restart or when @@ -511,6 +507,34 @@ class BucketList // Returns the total size of the BucketList, in bytes, excluding all // FutureBuckets uint64_t getSize() const; +}; + +// The LiveBucketList stores the current canonical state of the ledger. It is +// made up of LiveBucket buckets, which in turn store individual entries of type +// BucketEntry. When an entry is "evicted" from the ledger, it is removed from +// the LiveBucketList. Depending on the evicted entry type, it may then be added +// to the HotArchiveBucketList. +class LiveBucketList : public BucketListBase +{ + public: + // Reset Eviction Iterator position if an incoming spill or upgrade has + // invalidated the previous position + static void updateStartingEvictionIterator(EvictionIterator& iter, + uint32_t firstScanLevel, + uint32_t ledgerSeq); + + // Update eviction iter and record stats after scanning a region in one + // bucket. Returns true if scan has looped back to startIter, false + // otherwise. + static bool updateEvictionIterAndRecordStats( + EvictionIterator& iter, EvictionIterator startIter, + uint32_t configFirstScanLevel, uint32_t ledgerSeq, + std::shared_ptr stats, EvictionCounters& counters); + + static void checkIfEvictionScanIsStuck(EvictionIterator const& evictionIter, + uint32_t scanSize, + std::shared_ptr b, + EvictionCounters& counters); // Add a batch of initial (created), live (updated) and dead entries to the // bucketlist, representing the entries effected by closing @@ -524,6 +548,27 @@ class BucketList std::vector const& initEntries, std::vector const& liveEntries, std::vector const& deadEntries); + BucketEntryCounters sumBucketEntryCounters() const; }; + +// The HotArchiveBucketList stores recently evicted entries. It contains Buckets +// of type HotArchiveBucket, which store individual entries of type +// HotArchiveBucketEntry. +class HotArchiveBucketList : public BucketListBase +{ + private: + // For now, this class is identical to LiveBucketList. Later PRs will add + // additional functionality. + + // Merge result future + // This should be the result of merging this entire list into a single file. + // The MerkleBucketList is then initalized with this result + public: + void addBatch(Application& app, uint32_t currLedger, + uint32_t currLedgerProtocol, + std::vector const& archiveEntries, + std::vector const& restoredEntries, + std::vector const& deletedEntries); +}; } diff --git a/src/bucket/BucketListSnapshot.cpp b/src/bucket/BucketListSnapshot.cpp index 5d26fd8296..ae2c0a8737 100644 --- a/src/bucket/BucketListSnapshot.cpp +++ b/src/bucket/BucketListSnapshot.cpp @@ -3,59 +3,74 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "bucket/BucketListSnapshot.h" +#include "bucket/Bucket.h" #include "bucket/BucketInputIterator.h" +#include "bucket/BucketList.h" +#include "bucket/BucketSnapshot.h" #include "crypto/SecretKey.h" // IWYU pragma: keep #include "ledger/LedgerTxn.h" #include "medida/timer.h" #include "util/GlobalChecks.h" +#include +#include namespace stellar { - -BucketListSnapshot::BucketListSnapshot(BucketList const& bl, - LedgerHeader header) +template +BucketListSnapshot::BucketListSnapshot( + BucketListBase const& bl, LedgerHeader header) : mHeader(std::move(header)) { releaseAssert(threadIsMain()); - for (uint32_t i = 0; i < BucketList::kNumLevels; ++i) + for (uint32_t i = 0; i < BucketListBase::kNumLevels; ++i) { auto const& level = bl.getLevel(i); - mLevels.emplace_back(BucketLevelSnapshot(level)); + mLevels.emplace_back(BucketLevelSnapshot(level)); } } -BucketListSnapshot::BucketListSnapshot(BucketListSnapshot const& snapshot) +template +BucketListSnapshot::BucketListSnapshot( + BucketListSnapshot const& snapshot) : mLevels(snapshot.mLevels), mHeader(snapshot.mHeader) { } -std::vector const& -BucketListSnapshot::getLevels() const +template +std::vector> const& +BucketListSnapshot::getLevels() const { return mLevels; } +template uint32_t -BucketListSnapshot::getLedgerSeq() const +BucketListSnapshot::getLedgerSeq() const { return mHeader.ledgerSeq; } -// Loops through all buckets in the given snapshot, starting with curr at level -// 0, then snap at level 0, etc. Calls f on each bucket. Exits early if function -// returns true -namespace +template +LedgerHeader const& +SearchableBucketListSnapshotBase::getLedgerHeader() { + releaseAssert(mSnapshot); + mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots); + return mSnapshot->getLedgerHeader(); +} + +template void -loopAllBuckets(std::function f, - BucketListSnapshot const& snapshot) +SearchableBucketListSnapshotBase::loopAllBuckets( + std::function f, + BucketListSnapshot const& snapshot) const { for (auto const& lev : snapshot.getLevels()) { // Return true if we should exit loop early - auto processBucket = [f](BucketSnapshot const& b) { + auto processBucket = [f](BucketSnapshotT const& b) { if (b.isEmpty()) { return false; @@ -71,87 +86,23 @@ loopAllBuckets(std::function f, } } -// Loads bucket entry for LedgerKey k. Returns , -// where bloomMiss is true if a bloom miss occurred during the load. -std::pair, bool> -getLedgerEntryInternal(LedgerKey const& k, BucketListSnapshot const& snapshot) -{ - std::shared_ptr result{}; - auto sawBloomMiss = false; - - auto f = [&](BucketSnapshot const& b) { - auto [be, bloomMiss] = b.getBucketEntry(k); - sawBloomMiss = sawBloomMiss || bloomMiss; - - if (be.has_value()) - { - result = - be.value().type() == DEADENTRY - ? nullptr - : std::make_shared(be.value().liveEntry()); - return true; - } - else - { - return false; - } - }; - - loopAllBuckets(f, snapshot); - return {result, sawBloomMiss}; -} - -std::vector -loadKeysInternal(std::set const& inKeys, - BucketListSnapshot const& snapshot, LedgerKeyMeter* lkMeter) -{ - std::vector entries; - - // Make a copy of the key set, this loop is destructive - auto keys = inKeys; - auto f = [&](BucketSnapshot const& b) { - b.loadKeysWithLimits(keys, entries, lkMeter); - return keys.empty(); - }; - - loopAllBuckets(f, snapshot); - return entries; -} - -} - -uint32_t -SearchableBucketListSnapshot::getLedgerSeq() const -{ - releaseAssert(mSnapshot); - return mSnapshot->getLedgerSeq(); -} - -LedgerHeader const& -SearchableBucketListSnapshot::getLedgerHeader() -{ - releaseAssert(mSnapshot); - mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots); - return mSnapshot->getLedgerHeader(); -} - EvictionResult -SearchableBucketListSnapshot::scanForEviction( +SearchableLiveBucketListSnapshot::scanForEviction( uint32_t ledgerSeq, EvictionCounters& counters, EvictionIterator evictionIter, std::shared_ptr stats, - StateArchivalSettings const& sas) + StateArchivalSettings const& sas, uint32_t ledgerVers) { releaseAssert(mSnapshot); releaseAssert(stats); auto getBucketFromIter = [&levels = mSnapshot->getLevels()]( - EvictionIterator const& iter) -> BucketSnapshot const& { + EvictionIterator const& iter) -> LiveBucketSnapshot const& { auto& level = levels.at(iter.bucketListLevel); return iter.isCurrBucket ? level.curr : level.snap; }; - BucketList::updateStartingEvictionIterator( + LiveBucketList::updateStartingEvictionIterator( evictionIter, sas.startingEvictionScanLevel, ledgerSeq); EvictionResult result(sas); @@ -161,18 +112,18 @@ SearchableBucketListSnapshot::scanForEviction( for (;;) { auto const& b = getBucketFromIter(evictionIter); - BucketList::checkIfEvictionScanIsStuck( + LiveBucketList::checkIfEvictionScanIsStuck( evictionIter, sas.evictionScanSize, b.getRawBucket(), counters); // If we scan scanSize before hitting bucket EOF, exit early if (b.scanForEviction(evictionIter, scanSize, ledgerSeq, - result.eligibleKeys, *this)) + result.eligibleEntries, *this, ledgerVers)) { break; } // If we return back to the Bucket we started at, exit - if (BucketList::updateEvictionIterAndRecordStats( + if (LiveBucketList::updateEvictionIterAndRecordStats( evictionIter, startIter, sas.startingEvictionScanLevel, ledgerSeq, stats, counters)) { @@ -185,71 +136,113 @@ SearchableBucketListSnapshot::scanForEviction( return result; } -std::shared_ptr -SearchableBucketListSnapshot::load(LedgerKey const& k) +std::vector +SearchableLiveBucketListSnapshot::loadKeysWithLimits( + std::set const& inKeys, + LedgerKeyMeter* lkMeter) { ZoneScoped; - mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots); - releaseAssert(mSnapshot); + // Make a copy of the key set, this loop is destructive + auto keys = inKeys; + std::vector entries; + auto loadKeysLoop = [&](auto const& b) { + b.loadKeys(keys, entries, lkMeter); + return keys.empty(); + }; + + mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots); if (threadIsMain()) { - mSnapshotManager.startPointLoadTimer(); - auto [result, bloomMiss] = getLedgerEntryInternal(k, *mSnapshot); - mSnapshotManager.endPointLoadTimer(k.type(), bloomMiss); - return result; + auto timer = + mSnapshotManager.recordBulkLoadMetrics("prefetch", inKeys.size()) + .TimeScope(); + loopAllBuckets(loadKeysLoop, *mSnapshot); } else { - auto [result, bloomMiss] = getLedgerEntryInternal(k, *mSnapshot); - return result; + // TODO: Background metrics + loopAllBuckets(loadKeysLoop, *mSnapshot); } + + return entries; } -std::pair, bool> -SearchableBucketListSnapshot::loadKeysFromLedger( +std::optional> +SearchableLiveBucketListSnapshot::loadKeysFromLedger( std::set const& inKeys, uint32_t ledgerSeq) { ZoneScoped; + + // Make a copy of the key set, this loop is destructive + auto keys = inKeys; + std::vector entries; + auto loadKeysLoop = [&](auto const& b) { + b.loadKeys(keys, entries, /*lkMeter=*/nullptr); + return keys.empty(); + }; + mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots); - releaseAssert(mSnapshot); if (ledgerSeq == mSnapshot->getLedgerSeq()) { - auto result = loadKeysInternal(inKeys, *mSnapshot, /*lkMeter=*/nullptr); - return {result, true}; + loopAllBuckets(loadKeysLoop, *mSnapshot); } - - auto iter = mHistoricalSnapshots.find(ledgerSeq); - if (iter == mHistoricalSnapshots.end()) + else { - return {{}, false}; + auto iter = mHistoricalSnapshots.find(ledgerSeq); + if (iter == mHistoricalSnapshots.end()) + { + return std::nullopt; + } + + releaseAssert(iter->second); + loopAllBuckets(loadKeysLoop, *iter->second); } - releaseAssert(iter->second); - auto result = loadKeysInternal(inKeys, *iter->second, /*lkMeter=*/nullptr); - return {result, true}; + return entries; } -std::vector -SearchableBucketListSnapshot::loadKeysWithLimits( - std::set const& inKeys, - LedgerKeyMeter* lkMeter) +std::shared_ptr +SearchableLiveBucketListSnapshot::load(LedgerKey const& k) { ZoneScoped; - mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots); - releaseAssert(mSnapshot); + std::shared_ptr result{}; + auto sawBloomMiss = false; + + // Search function called on each Bucket in BucketList until we find the key + auto loadKeyBucketLoop = [&](auto const& b) { + auto [be, bloomMiss] = b.getBucketEntry(k); + sawBloomMiss = sawBloomMiss || bloomMiss; + + if (be) + { + result = LiveBucket::isTombstoneEntry(*be) + ? nullptr + : std::make_shared(be->liveEntry()); + + return true; + } + else + { + return false; + } + }; + + mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots); if (threadIsMain()) { - auto timer = - mSnapshotManager.recordBulkLoadMetrics("prefetch", inKeys.size()) - .TimeScope(); - return loadKeysInternal(inKeys, *mSnapshot, lkMeter); + mSnapshotManager.startPointLoadTimer(); + loopAllBuckets(loadKeyBucketLoop, *mSnapshot); + mSnapshotManager.endPointLoadTimer(k.type(), sawBloomMiss); + return result; } else { - return loadKeysInternal(inKeys, *mSnapshot, lkMeter); + // TODO: Background metrics + loopAllBuckets(loadKeyBucketLoop, *mSnapshot); + return result; } } @@ -259,7 +252,7 @@ SearchableBucketListSnapshot::loadKeysWithLimits( // 2. Perform a bulk lookup for all possible trustline keys, that is, all // trustlines with the given accountID and poolID from step 1 std::vector -SearchableBucketListSnapshot::loadPoolShareTrustLinesByAccountAndAsset( +SearchableLiveBucketListSnapshot::loadPoolShareTrustLinesByAccountAndAsset( AccountID const& accountID, Asset const& asset) { ZoneScoped; @@ -271,7 +264,8 @@ SearchableBucketListSnapshot::loadPoolShareTrustLinesByAccountAndAsset( LedgerKeySet trustlinesToLoad; - auto trustLineLoop = [&](BucketSnapshot const& b) { + auto trustLineLoop = [&](auto const& rawB) { + auto const& b = static_cast(rawB); for (auto const& poolID : b.getPoolIDsByAsset(asset)) { LedgerKey trustlineKey(TRUSTLINE); @@ -290,12 +284,20 @@ SearchableBucketListSnapshot::loadPoolShareTrustLinesByAccountAndAsset( .recordBulkLoadMetrics("poolshareTrustlines", trustlinesToLoad.size()) .TimeScope(); - return loadKeysInternal(trustlinesToLoad, *mSnapshot, nullptr); + + std::vector result; + auto loadKeysLoop = [&](auto const& b) { + b.loadKeys(trustlinesToLoad, result, /*lkMeter=*/nullptr); + return trustlinesToLoad.empty(); + }; + + loopAllBuckets(loadKeysLoop, *mSnapshot); + return result; } std::vector -SearchableBucketListSnapshot::loadInflationWinners(size_t maxWinners, - int64_t minBalance) +SearchableLiveBucketListSnapshot::loadInflationWinners(size_t maxWinners, + int64_t minBalance) { ZoneScoped; mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots); @@ -310,8 +312,8 @@ SearchableBucketListSnapshot::loadInflationWinners(size_t maxWinners, UnorderedMap voteCount; UnorderedSet seen; - auto countVotesInBucket = [&](BucketSnapshot const& b) { - for (BucketInputIterator in(b.getRawBucket()); in; ++in) + auto countVotesInBucket = [&](LiveBucketSnapshot const& b) { + for (LiveBucketInputIterator in(b.getRawBucket()); in; ++in) { BucketEntry const& be = *in; if (be.type() == DEADENTRY) @@ -386,17 +388,114 @@ SearchableBucketListSnapshot::loadInflationWinners(size_t maxWinners, return winners; } -BucketLevelSnapshot::BucketLevelSnapshot(BucketLevel const& level) +template +BucketLevelSnapshot::BucketLevelSnapshot( + BucketLevel const& level) : curr(level.getCurr()), snap(level.getSnap()) { } -SearchableBucketListSnapshot::SearchableBucketListSnapshot( +template +SearchableBucketListSnapshotBase::SearchableBucketListSnapshotBase( BucketSnapshotManager const& snapshotManager) : mSnapshotManager(snapshotManager), mHistoricalSnapshots() { - // Initialize snapshot from SnapshotManager + mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots); } +template +SearchableBucketListSnapshotBase::~SearchableBucketListSnapshotBase() +{ +} + +SearchableLiveBucketListSnapshot::SearchableLiveBucketListSnapshot( + BucketSnapshotManager const& snapshotManager) + : SearchableBucketListSnapshotBase(snapshotManager) +{ +} + +SearchableHotArchiveBucketListSnapshot::SearchableHotArchiveBucketListSnapshot( + BucketSnapshotManager const& snapshotManager) + : SearchableBucketListSnapshotBase(snapshotManager) +{ +} + +std::shared_ptr +SearchableHotArchiveBucketListSnapshot::load(LedgerKey const& k) +{ + ZoneScoped; + + // Search function called on each Bucket in BucketList until we find the key + std::shared_ptr result{}; + auto loadKeyBucketLoop = [&](auto const& b) { + auto [be, _] = b.getBucketEntry(k); + + if (be) + { + result = HotArchiveBucket::isTombstoneEntry(*be) ? nullptr : be; + return true; + } + else + { + return false; + } + }; + + // TODO: Metrics + mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots); + loopAllBuckets(loadKeyBucketLoop, *mSnapshot); + return result; +} + +std::vector +SearchableHotArchiveBucketListSnapshot::loadKeys( + std::set const& inKeys) +{ + auto op = loadKeysFromLedger(inKeys, getLedgerSeq()); + releaseAssertOrThrow(op); + return std::move(*op); +} + +std::optional> +SearchableHotArchiveBucketListSnapshot::loadKeysFromLedger( + std::set const& inKeys, uint32_t ledgerSeq) +{ + ZoneScoped; + std::vector entries; + + // Make a copy of the key set, this loop is destructive + auto keys = inKeys; + auto loadKeysLoop = [&](auto const& b) { + b.loadKeys(keys, entries, /*lkMeter=*/nullptr); + return keys.empty(); + }; + + mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots); + + if (ledgerSeq == mSnapshot->getLedgerSeq()) + { + loopAllBuckets(loadKeysLoop, *mSnapshot); + } + else + { + auto iter = mHistoricalSnapshots.find(ledgerSeq); + if (iter == mHistoricalSnapshots.end()) + { + return std::nullopt; + } + + releaseAssert(iter->second); + loopAllBuckets(loadKeysLoop, *iter->second); + } + + return entries; +} + +template struct BucketLevelSnapshot; +template struct BucketLevelSnapshot; +template class BucketListSnapshot; +template class BucketListSnapshot; +template class SearchableBucketListSnapshotBase; +template class SearchableBucketListSnapshotBase; } \ No newline at end of file diff --git a/src/bucket/BucketListSnapshot.h b/src/bucket/BucketListSnapshot.h index ea14869f3a..0156679461 100644 --- a/src/bucket/BucketListSnapshot.h +++ b/src/bucket/BucketListSnapshot.h @@ -4,6 +4,7 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 +#include "bucket/Bucket.h" #include "bucket/BucketList.h" #include "bucket/BucketManagerImpl.h" #include "bucket/BucketSnapshot.h" @@ -17,30 +18,43 @@ class Timer; namespace stellar { -struct BucketLevelSnapshot +template struct BucketLevelSnapshot { - BucketSnapshot curr; - BucketSnapshot snap; + static_assert(std::is_same_v || + std::is_same_v); - BucketLevelSnapshot(BucketLevel const& level); + using BucketSnapshotT = + std::conditional_t, + LiveBucketSnapshot, HotArchiveBucketSnapshot>; + + BucketSnapshotT curr; + BucketSnapshotT snap; + + BucketLevelSnapshot(BucketLevel const& level); }; -class BucketListSnapshot : public NonMovable +template class BucketListSnapshot : public NonMovable { + static_assert(std::is_same_v || + std::is_same_v); + using BucketSnapshotT = + std::conditional_t, + LiveBucketSnapshot, HotArchiveBucketSnapshot>; + private: - std::vector mLevels; + std::vector> mLevels; // LedgerHeader associated with this ledger state snapshot LedgerHeader const mHeader; public: - BucketListSnapshot(BucketList const& bl, LedgerHeader hhe); + BucketListSnapshot(BucketListBase const& bl, LedgerHeader hhe); // Only allow copies via constructor BucketListSnapshot(BucketListSnapshot const& snapshot); BucketListSnapshot& operator=(BucketListSnapshot const&) = delete; - std::vector const& getLevels() const; + std::vector> const& getLevels() const; uint32_t getLedgerSeq() const; LedgerHeader const& getLedgerHeader() const @@ -58,21 +72,54 @@ class BucketListSnapshot : public NonMovable // instance will check that the current snapshot is up to date via the // BucketListSnapshotManager and will be refreshed accordingly. Callers can // assume SearchableBucketListSnapshot is always up to date. -class SearchableBucketListSnapshot : public NonMovableOrCopyable +template +class SearchableBucketListSnapshotBase : public NonMovableOrCopyable { + static_assert(std::is_same_v || + std::is_same_v); + + using BucketSnapshotT = + std::conditional_t, + LiveBucketSnapshot, HotArchiveBucketSnapshot>; + + protected: + virtual ~SearchableBucketListSnapshotBase() = 0; + BucketSnapshotManager const& mSnapshotManager; // Snapshot managed by SnapshotManager - std::unique_ptr mSnapshot{}; - std::map> + std::unique_ptr const> mSnapshot{}; + std::map const>> mHistoricalSnapshots; - SearchableBucketListSnapshot(BucketSnapshotManager const& snapshotManager); + // Loops through all buckets, starting with curr at level 0, then snap at + // level 0, etc. Calls f on each bucket. Exits early if function + // returns true + void loopAllBuckets(std::function f, + BucketListSnapshot const& snapshot) const; + + SearchableBucketListSnapshotBase( + BucketSnapshotManager const& snapshotManager); + + public: + uint32_t + getLedgerSeq() const + { + return mSnapshot->getLedgerSeq(); + } + + LedgerHeader const& getLedgerHeader(); +}; - friend std::shared_ptr - BucketSnapshotManager::copySearchableBucketListSnapshot() const; +class SearchableLiveBucketListSnapshot + : public SearchableBucketListSnapshotBase +{ + SearchableLiveBucketListSnapshot( + BucketSnapshotManager const& snapshotManager); public: + std::shared_ptr load(LedgerKey const& k); + std::vector loadKeysWithLimits(std::set const& inKeys, LedgerKeyMeter* lkMeter = nullptr); @@ -84,15 +131,13 @@ class SearchableBucketListSnapshot : public NonMovableOrCopyable std::vector loadInflationWinners(size_t maxWinners, int64_t minBalance); - std::shared_ptr load(LedgerKey const& k); - // Loads inKeys from the specified historical snapshot. Returns - // if the snapshot for the given ledger is - // available, otherwise. Note that ledgerSeq is defined + // load_result_vec if the snapshot for the given ledger is + // available, std::nullopt otherwise. Note that ledgerSeq is defined // as the state of the BucketList at the beginning of the ledger. This means // that for ledger N, the maximum lastModifiedLedgerSeq of any LedgerEntry // in the BucketList is N - 1. - std::pair, bool> + std::optional> loadKeysFromLedger(std::set const& inKeys, uint32_t ledgerSeq); @@ -100,8 +145,36 @@ class SearchableBucketListSnapshot : public NonMovableOrCopyable EvictionCounters& counters, EvictionIterator evictionIter, std::shared_ptr stats, - StateArchivalSettings const& sas); - uint32_t getLedgerSeq() const; - LedgerHeader const& getLedgerHeader(); + StateArchivalSettings const& sas, + uint32_t ledgerVers); + + friend std::shared_ptr + BucketSnapshotManager::copySearchableLiveBucketListSnapshot() const; +}; + +class SearchableHotArchiveBucketListSnapshot + : public SearchableBucketListSnapshotBase +{ + SearchableHotArchiveBucketListSnapshot( + BucketSnapshotManager const& snapshotManager); + + public: + std::shared_ptr load(LedgerKey const& k); + + std::vector + loadKeys(std::set const& inKeys); + + // Loads inKeys from the specified historical snapshot. Returns + // load_result_vec if the snapshot for the given ledger is + // available, std::nullopt otherwise. Note that ledgerSeq is defined + // as the state of the BucketList at the beginning of the ledger. This means + // that for ledger N, the maximum lastModifiedLedgerSeq of any LedgerEntry + // in the BucketList is N - 1. + std::optional> + loadKeysFromLedger(std::set const& inKeys, + uint32_t ledgerSeq); + + friend std::shared_ptr + BucketSnapshotManager::copySearchableHotArchiveBucketListSnapshot() const; }; } \ No newline at end of file diff --git a/src/bucket/BucketManager.h b/src/bucket/BucketManager.h index a64bd9181f..d268140e7e 100644 --- a/src/bucket/BucketManager.h +++ b/src/bucket/BucketManager.h @@ -26,11 +26,13 @@ namespace stellar class AbstractLedgerTxn; class Application; class BasicWork; -class BucketList; +class LiveBucketList; +class HotArchiveBucketList; class BucketSnapshotManager; class Config; -class SearchableBucketListSnapshot; class TmpDirManager; +class SearchableHotArchiveBucketListSnapshot; +class SearchableLiveBucketListSnapshot; struct HistoryArchiveState; struct InflationWinner; struct LedgerHeader; @@ -84,22 +86,22 @@ struct MergeCounters // iterator as if that key was the last entry evicted struct EvictionResultEntry { - LedgerKey key; + LedgerEntry entry; EvictionIterator iter; uint32_t liveUntilLedger; - EvictionResultEntry(LedgerKey const& key, EvictionIterator const& iter, + EvictionResultEntry(LedgerEntry const& entry, EvictionIterator const& iter, uint32_t liveUntilLedger) - : key(key), iter(iter), liveUntilLedger(liveUntilLedger) + : entry(entry), iter(iter), liveUntilLedger(liveUntilLedger) { } }; struct EvictionResult { - // List of keys eligible for eviction in the order in which they occur in + // List of entries eligible for eviction in the order in which they occur in // the bucket - std::list eligibleKeys{}; + std::list eligibleEntries{}; // Eviction iterator at the end of the scan region EvictionIterator endOfRegionIterator; @@ -192,7 +194,8 @@ class BucketManager : NonMovableOrCopyable virtual std::string const& getTmpDir() = 0; virtual TmpDirManager& getTmpDirManager() = 0; virtual std::string const& getBucketDir() const = 0; - virtual BucketList& getBucketList() = 0; + virtual LiveBucketList& getLiveBucketList() = 0; + virtual HotArchiveBucketList& getHotArchiveBucketList() = 0; virtual BucketSnapshotManager& getBucketSnapshotManager() const = 0; virtual bool renameBucketDirFile(std::filesystem::path const& src, std::filesystem::path const& dst) = 0; @@ -215,12 +218,16 @@ class BucketManager : NonMovableOrCopyable // This method is mostly-threadsafe -- assuming you don't destruct the // BucketManager mid-call -- and is intended to be called from both main and // worker threads. Very carefully. - virtual std::shared_ptr - adoptFileAsBucket(std::string const& filename, uint256 const& hash, - MergeKey* mergeKey, - std::unique_ptr index) = 0; - - // Companion method to `adoptFileAsBucket` also called from the + virtual std::shared_ptr + adoptFileAsLiveBucket(std::string const& filename, uint256 const& hash, + MergeKey* mergeKey, + std::unique_ptr index) = 0; + virtual std::shared_ptr + adoptFileAsHotArchiveBucket(std::string const& filename, + uint256 const& hash, MergeKey* mergeKey, + std::unique_ptr index) = 0; + + // Companion method to `adoptFileAsLiveBucket` also called from the // `BucketOutputIterator::getBucket` merge-completion path. This method // however should be called when the output bucket is _empty_ and thereby // doesn't correspond to a file on disk; the method forgets about the @@ -233,15 +240,20 @@ class BucketManager : NonMovableOrCopyable virtual std::shared_ptr getBucketIfExists(uint256 const& hash) = 0; // Return a bucket by hash if we have it, else return nullptr. - virtual std::shared_ptr getBucketByHash(uint256 const& hash) = 0; + virtual std::shared_ptr + getLiveBucketByHash(uint256 const& hash) = 0; + virtual std::shared_ptr + getHotArchiveBucketByHash(uint256 const& hash) = 0; // Get a reference to a merge-future that's either running (or finished // somewhat recently) from either a map of the std::shared_futures doing the // merges and/or a set of records mapping merge inputs to outputs and the // set of outputs held in the BucketManager. Returns an invalid future if no // such future can be found or synthesized. - virtual std::shared_future> - getMergeFuture(MergeKey const& key) = 0; + virtual std::shared_future> + getLiveMergeFuture(MergeKey const& key) = 0; + virtual std::shared_future> + getHotArchiveMergeFuture(MergeKey const& key) = 0; // Add a reference to a merge _in progress_ (not yet adopted as a file) to // the BucketManager's internal map of std::shared_futures doing merges. @@ -249,8 +261,11 @@ class BucketManager : NonMovableOrCopyable // be removed from the map when the merge completes and the output file is // adopted. virtual void - putMergeFuture(MergeKey const& key, - std::shared_future>) = 0; + putLiveMergeFuture(MergeKey const& key, + std::shared_future>) = 0; + virtual void putHotArchiveMergeFuture( + MergeKey const& key, + std::shared_future>) = 0; #ifdef BUILD_TESTS // Drop all references to merge futures in progress. @@ -267,10 +282,15 @@ class BucketManager : NonMovableOrCopyable // be given separate init (created) and live (updated) entry vectors. The // `header` value should be taken from the ledger at which this batch is // being added. - virtual void addBatch(Application& app, LedgerHeader header, - std::vector const& initEntries, - std::vector const& liveEntries, - std::vector const& deadEntries) = 0; + virtual void addLiveBatch(Application& app, LedgerHeader header, + std::vector const& initEntries, + std::vector const& liveEntries, + std::vector const& deadEntries) = 0; + virtual void + addHotArchiveBatch(Application& app, LedgerHeader header, + std::vector const& archivedEntries, + std::vector const& restoredEntries, + std::vector const& deletedEntries) = 0; // Update the given LedgerHeader's bucketListHash to reflect the current // state of the bucket list. @@ -287,20 +307,25 @@ class BucketManager : NonMovableOrCopyable // Scans BucketList for non-live entries to evict starting at the entry // pointed to by EvictionIterator. Scans until `maxEntriesToEvict` entries // have been evicted or maxEvictionScanSize bytes have been scanned. - virtual void scanForEvictionLegacy(AbstractLedgerTxn& ltx, - uint32_t ledgerSeq) = 0; - - virtual void startBackgroundEvictionScan(uint32_t ledgerSeq) = 0; - virtual void + virtual void startBackgroundEvictionScan(uint32_t ledgerSeq, + uint32_t ledgerVers) = 0; + + // Returns a pair of vectors representing entries evicted this ledger, where + // the first vector constains all deleted keys (TTL and temporary), and + // the second vector contains all archived keys (persistent and + // ContractCode). Note that when an entry is archived, its TTL key will be + // included in the deleted keys vector. + virtual std::pair, std::vector> resolveBackgroundEvictionScan(AbstractLedgerTxn& ltx, uint32_t ledgerSeq, - LedgerKeySet const& modifiedKeys) = 0; + LedgerKeySet const& modifiedKeys, + uint32_t ledgerVers) = 0; virtual medida::Meter& getBloomMissMeter() const = 0; virtual medida::Meter& getBloomLookupMeter() const = 0; #ifdef BUILD_TESTS // Install a fake/assumed ledger version and bucket list hash to use in next - // call to addBatch and snapshotLedger. This interface exists only for + // call to addLiveBatch and snapshotLedger. This interface exists only for // testing in a specific type of history replay. virtual void setNextCloseVersionAndHashForTesting(uint32_t protocolVers, uint256 const& hash) = 0; @@ -349,7 +374,7 @@ class BucketManager : NonMovableOrCopyable // Merge the bucket list of the provided HAS into a single "super bucket" // consisting of only live entries, and return it. - virtual std::shared_ptr + virtual std::shared_ptr mergeBuckets(HistoryArchiveState const& has) = 0; // Visits all the active ledger entries or subset thereof. @@ -382,9 +407,12 @@ class BucketManager : NonMovableOrCopyable virtual Config const& getConfig() const = 0; - // Get bucketlist snapshot - virtual std::shared_ptr - getSearchableBucketListSnapshot() = 0; + // Get main thread's bucketlist snapshot + virtual std::shared_ptr + getSearchableLiveBucketListSnapshot() = 0; + virtual std::shared_ptr + getSearchableHotArchiveBucketListSnapshot() = 0; + virtual void reportBucketEntryCountMetrics() = 0; }; } diff --git a/src/bucket/BucketManagerImpl.cpp b/src/bucket/BucketManagerImpl.cpp index a953bef124..fcb0cab6ad 100644 --- a/src/bucket/BucketManagerImpl.cpp +++ b/src/bucket/BucketManagerImpl.cpp @@ -12,6 +12,7 @@ #include "bucket/BucketSnapshotManager.h" #include "crypto/BLAKE2.h" #include "crypto/Hex.h" +#include "crypto/SHA.h" #include "history/HistoryManager.h" #include "historywork/VerifyBucketWork.h" #include "ledger/LedgerManager.h" @@ -23,6 +24,7 @@ #include "util/GlobalChecks.h" #include "util/LogSlowExecution.h" #include "util/Logging.h" +#include "util/ProtocolVersion.h" #include "util/TmpDir.h" #include "util/types.h" #include "xdr/Stellar-ledger.h" @@ -30,6 +32,7 @@ #include #include #include +#include #include #include #include @@ -123,16 +126,15 @@ BucketManagerImpl::initialize() if (mApp.getConfig().MODE_ENABLES_BUCKETLIST) { - mBucketList = std::make_unique(); - - if (mApp.getConfig().isUsingBucketListDB()) - { - mSnapshotManager = std::make_unique( - mApp, - std::make_unique(*mBucketList, - LedgerHeader()), - mApp.getConfig().QUERY_SNAPSHOT_LEDGERS); - } + mLiveBucketList = std::make_unique(); + mHotArchiveBucketList = std::make_unique(); + mSnapshotManager = std::make_unique( + mApp, + std::make_unique>(*mLiveBucketList, + LedgerHeader()), + std::make_unique>( + *mHotArchiveBucketList, LedgerHeader()), + mApp.getConfig().QUERY_SNAPSHOT_LEDGERS); } } @@ -166,14 +168,20 @@ EvictionCounters::EvictionCounters(Application& app) BucketManagerImpl::BucketManagerImpl(Application& app) : mApp(app) - , mBucketList(nullptr) + , mLiveBucketList(nullptr) + , mHotArchiveBucketList(nullptr) , mSnapshotManager(nullptr) , mTmpDirManager(nullptr) , mWorkDir(nullptr) , mLockedBucketDir(nullptr) - , mBucketObjectInsertBatch(app.getMetrics().NewMeter( + , mBucketLiveObjectInsertBatch(app.getMetrics().NewMeter( {"bucket", "batch", "objectsadded"}, "object")) - , mBucketAddBatch(app.getMetrics().NewTimer({"bucket", "batch", "addtime"})) + , mBucketArchiveObjectInsertBatch(app.getMetrics().NewMeter( + {"bucket", "batch-archive", "objectsadded"}, "object")) + , mBucketAddLiveBatch( + app.getMetrics().NewTimer({"bucket", "batch", "addtime"})) + , mBucketAddArchiveBatch( + app.getMetrics().NewTimer({"bucket", "batch-archive", "addtime"})) , mBucketSnapMerge(app.getMetrics().NewTimer({"bucket", "snap", "merge"})) , mSharedBucketsSize( app.getMetrics().NewCounter({"bucket", "memory", "shared"})) @@ -181,14 +189,12 @@ BucketManagerImpl::BucketManagerImpl(Application& app) {"bucketlistDB", "bloom", "misses"}, "bloom")) , mBucketListDBBloomLookups(app.getMetrics().NewMeter( {"bucketlistDB", "bloom", "lookups"}, "bloom")) - , mBucketListSizeCounter( + , mLiveBucketListSizeCounter( app.getMetrics().NewCounter({"bucketlist", "size", "bytes"})) + , mArchiveBucketListSizeCounter( + app.getMetrics().NewCounter({"bucketlist-archive", "size", "bytes"})) , mBucketListEvictionCounters(app) , mEvictionStatistics(std::make_shared()) - // Minimal DB is stored in the buckets dir, so delete it only when - // mode does not use minimal DB - , mDeleteEntireBucketDirInDtor( - app.getConfig().isInMemoryModeWithoutMinimalDB()) { for (uint32_t t = static_cast(LedgerEntryTypeAndDurability::ACCOUNT); @@ -273,14 +279,7 @@ BucketManagerImpl::getBucketDir() const BucketManagerImpl::~BucketManagerImpl() { ZoneScoped; - if (mDeleteEntireBucketDirInDtor) - { - deleteEntireBucketDir(); - } - else - { - deleteTmpDirAndUnlockBucketDir(); - } + deleteTmpDirAndUnlockBucketDir(); } void @@ -328,17 +327,23 @@ BucketManagerImpl::deleteTmpDirAndUnlockBucketDir() } } -BucketList& -BucketManagerImpl::getBucketList() +LiveBucketList& +BucketManagerImpl::getLiveBucketList() { releaseAssertOrThrow(mApp.getConfig().MODE_ENABLES_BUCKETLIST); - return *mBucketList; + return *mLiveBucketList; +} + +HotArchiveBucketList& +BucketManagerImpl::getHotArchiveBucketList() +{ + releaseAssertOrThrow(mApp.getConfig().MODE_ENABLES_BUCKETLIST); + return *mHotArchiveBucketList; } BucketSnapshotManager& BucketManagerImpl::getBucketSnapshotManager() const { - releaseAssertOrThrow(mApp.getConfig().isUsingBucketListDB()); releaseAssert(mSnapshotManager); return *mSnapshotManager; } @@ -474,7 +479,26 @@ BucketManagerImpl::renameBucketDirFile(std::filesystem::path const& src, } } -std::shared_ptr +std::shared_ptr +BucketManagerImpl::adoptFileAsLiveBucket( + std::string const& filename, uint256 const& hash, MergeKey* mergeKey, + std::unique_ptr index) +{ + return adoptFileAsBucket(filename, hash, mergeKey, + std::move(index)); +} + +std::shared_ptr +BucketManagerImpl::adoptFileAsHotArchiveBucket( + std::string const& filename, uint256 const& hash, MergeKey* mergeKey, + std::unique_ptr index) +{ + return adoptFileAsBucket(filename, hash, mergeKey, + std::move(index)); +} + +template +std::shared_ptr BucketManagerImpl::adoptFileAsBucket(std::string const& filename, uint256 const& hash, MergeKey* mergeKey, std::unique_ptr index) @@ -492,15 +516,16 @@ BucketManagerImpl::adoptFileAsBucket(std::string const& filename, // weak record of the input/output mapping, so we can reconstruct the // future if anyone wants to restart the same merge before the bucket // expires. - CLOG_TRACE(Bucket, - "BucketManager::adoptFileAsBucket switching merge {} from " - "live to finished for output={}", - *mergeKey, hexAbbrev(hash)); + CLOG_TRACE( + Bucket, + "BucketManager::adoptFileAsLiveBucket switching merge {} from " + "live to finished for output={}", + *mergeKey, hexAbbrev(hash)); mLiveFutures.erase(*mergeKey); } // Check to see if we have an existing bucket (either in-memory or on-disk) - std::shared_ptr b = getBucketByHash(hash); + std::shared_ptr b = getBucketByHash(hash); if (b) { CLOG_DEBUG( @@ -535,7 +560,7 @@ BucketManagerImpl::adoptFileAsBucket(std::string const& filename, } } - b = std::make_shared(canonicalName, hash, std::move(index)); + b = std::make_shared(canonicalName, hash, std::move(index)); { mSharedBuckets.emplace(hash, b); mSharedBucketsSize.set_count(mSharedBuckets.size()); @@ -587,21 +612,41 @@ BucketManagerImpl::getBucketIfExists(uint256 const& hash) return nullptr; } -std::shared_ptr +std::shared_ptr +BucketManagerImpl::getLiveBucketByHash(uint256 const& hash) +{ + return getBucketByHash(hash); +} + +std::shared_ptr +BucketManagerImpl::getHotArchiveBucketByHash(uint256 const& hash) +{ + return getBucketByHash(hash); +} + +template +std::shared_ptr BucketManagerImpl::getBucketByHash(uint256 const& hash) { ZoneScoped; std::lock_guard lock(mBucketMutex); if (isZero(hash)) { - return std::make_shared(); + return std::make_shared(); } auto i = mSharedBuckets.find(hash); if (i != mSharedBuckets.end()) { CLOG_TRACE(Bucket, "BucketManager::getBucketByHash({}) found bucket {}", binToHex(hash), i->second->getFilename()); - return i->second; + + // Because BucketManger has an impl class, no public templated functions + // can be declared. This means we have to manually enforce types via + // `getLiveBucketByHash` and `getHotBucketByHash`, leading to this ugly + // cast. + auto ret = std::dynamic_pointer_cast(i->second); + releaseAssertOrThrow(ret); + return ret; } std::string canonicalName = bucketFilename(hash); if (fs::exists(canonicalName)) @@ -612,15 +657,28 @@ BucketManagerImpl::getBucketByHash(uint256 const& hash) binToHex(hash)); auto p = - std::make_shared(canonicalName, hash, /*index=*/nullptr); + std::make_shared(canonicalName, hash, /*index=*/nullptr); mSharedBuckets.emplace(hash, p); mSharedBucketsSize.set_count(mSharedBuckets.size()); return p; } - return std::shared_ptr(); + return std::shared_ptr(); +} + +std::shared_future> +BucketManagerImpl::getLiveMergeFuture(MergeKey const& key) +{ + return getMergeFuture(key); } -std::shared_future> +std::shared_future> +BucketManagerImpl::getHotArchiveMergeFuture(MergeKey const& key) +{ + return getMergeFuture(key); +} + +template +std::shared_future> BucketManagerImpl::getMergeFuture(MergeKey const& key) { ZoneScoped; @@ -634,14 +692,14 @@ BucketManagerImpl::getMergeFuture(MergeKey const& key) Hash bucketHash; if (mFinishedMerges.findMergeFor(key, bucketHash)) { - auto bucket = getBucketByHash(bucketHash); + auto bucket = getBucketByHash(bucketHash); if (bucket) { CLOG_TRACE(Bucket, "BucketManager::getMergeFuture returning new future " "for finished merge {} with output={}", key, hexAbbrev(bucketHash)); - std::promise> promise; + std::promise> promise; auto future = promise.get_future().share(); promise.set_value(bucket); mc.mFinishedMergeReattachments++; @@ -653,7 +711,7 @@ BucketManagerImpl::getMergeFuture(MergeKey const& key) Bucket, "BucketManager::getMergeFuture returning empty future for merge {}", key); - return std::shared_future>(); + return std::shared_future>(); } CLOG_TRACE( Bucket, @@ -661,12 +719,32 @@ BucketManagerImpl::getMergeFuture(MergeKey const& key) key); mc.mRunningMergeReattachments++; incrMergeCounters(mc); - return i->second; + + // Because BucketManger has an impl class, no public templated functions + // can be declared. This means we have to manually enforce types via + // leading to this ugly variadic get that throws if the type is not correct. + return std::get>>(i->second); } +void +BucketManagerImpl::putLiveMergeFuture( + MergeKey const& key, std::shared_future> wp) +{ + putMergeFuture(key, wp); +} + +void +BucketManagerImpl::putHotArchiveMergeFuture( + MergeKey const& key, + std::shared_future> wp) +{ + putMergeFuture(key, wp); +} + +template void BucketManagerImpl::putMergeFuture( - MergeKey const& key, std::shared_future> wp) + MergeKey const& key, std::shared_future> wp) { ZoneScoped; releaseAssertOrThrow(mApp.getConfig().MODE_ENABLES_BUCKETLIST); @@ -697,31 +775,37 @@ BucketManagerImpl::getBucketListReferencedBuckets() const return referenced; } - // retain current bucket list - for (uint32_t i = 0; i < BucketList::kNumLevels; ++i) - { - auto const& level = mBucketList->getLevel(i); - auto rit = referenced.emplace(level.getCurr()->getHash()); - if (rit.second) + auto processBucketList = [&](auto const& bl, uint32_t levels) { + // retain current bucket list + for (uint32_t i = 0; i < levels; ++i) { - CLOG_TRACE(Bucket, "{} referenced by bucket list", - binToHex(*rit.first)); - } - rit = referenced.emplace(level.getSnap()->getHash()); - if (rit.second) - { - CLOG_TRACE(Bucket, "{} referenced by bucket list", - binToHex(*rit.first)); - } - for (auto const& h : level.getNext().getHashes()) - { - rit = referenced.emplace(hexToBin256(h)); + auto const& level = bl->getLevel(i); + auto rit = referenced.emplace(level.getCurr()->getHash()); if (rit.second) { - CLOG_TRACE(Bucket, "{} referenced by bucket list", h); + CLOG_TRACE(Bucket, "{} referenced by bucket list", + binToHex(*rit.first)); + } + rit = referenced.emplace(level.getSnap()->getHash()); + if (rit.second) + { + CLOG_TRACE(Bucket, "{} referenced by bucket list", + binToHex(*rit.first)); + } + for (auto const& h : level.getNext().getHashes()) + { + rit = referenced.emplace(hexToBin256(h)); + if (rit.second) + { + CLOG_TRACE(Bucket, "{} referenced by bucket list", h); + } } } - } + }; + + processBucketList(mLiveBucketList, LiveBucketList::kNumLevels); + processBucketList(mHotArchiveBucketList, + BucketListBase::kNumLevels); return referenced; } @@ -876,7 +960,7 @@ BucketManagerImpl::forgetUnreferencedBuckets() // There should be no futures alive with this output: we // switched to storing only weak input/output mappings // when any merge producing the bucket completed (in - // adoptFileAsBucket), and we believe there's only one + // adoptFileAsLiveBucket), and we believe there's only one // reference to the bucket anyways -- our own in // mSharedBuckets. But there might be a race we missed, // so double check & mop up here. Worst case we prevent @@ -901,10 +985,10 @@ BucketManagerImpl::forgetUnreferencedBuckets() } void -BucketManagerImpl::addBatch(Application& app, LedgerHeader header, - std::vector const& initEntries, - std::vector const& liveEntries, - std::vector const& deadEntries) +BucketManagerImpl::addLiveBatch(Application& app, LedgerHeader header, + std::vector const& initEntries, + std::vector const& liveEntries, + std::vector const& deadEntries) { ZoneScoped; releaseAssertOrThrow(app.getConfig().MODE_ENABLES_BUCKETLIST); @@ -914,17 +998,44 @@ BucketManagerImpl::addBatch(Application& app, LedgerHeader header, header.ledgerVersion = mFakeTestProtocolVersion; } #endif - auto timer = mBucketAddBatch.TimeScope(); - mBucketObjectInsertBatch.Mark(initEntries.size() + liveEntries.size() + - deadEntries.size()); - mBucketList->addBatch(app, header.ledgerSeq, header.ledgerVersion, - initEntries, liveEntries, deadEntries); - mBucketListSizeCounter.set_count(mBucketList->getSize()); - - if (app.getConfig().isUsingBucketListDB()) + auto timer = mBucketAddLiveBatch.TimeScope(); + mBucketLiveObjectInsertBatch.Mark(initEntries.size() + liveEntries.size() + + deadEntries.size()); + mLiveBucketList->addBatch(app, header.ledgerSeq, header.ledgerVersion, + initEntries, liveEntries, deadEntries); + mLiveBucketListSizeCounter.set_count(mLiveBucketList->getSize()); + reportBucketEntryCountMetrics(); +} + +void +BucketManagerImpl::addHotArchiveBatch( + Application& app, LedgerHeader header, + std::vector const& archivedEntries, + std::vector const& restoredEntries, + std::vector const& deletedEntries) +{ + ZoneScoped; + releaseAssertOrThrow(app.getConfig().MODE_ENABLES_BUCKETLIST); + releaseAssertOrThrow(protocolVersionStartsFrom( + header.ledgerVersion, + Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)); +#ifdef BUILD_TESTS + if (mUseFakeTestValuesForNextClose) { - reportBucketEntryCountMetrics(); + header.ledgerVersion = mFakeTestProtocolVersion; } +#endif + auto timer = mBucketAddArchiveBatch.TimeScope(); + mBucketArchiveObjectInsertBatch.Mark(archivedEntries.size() + + restoredEntries.size() + + deletedEntries.size()); + + // Hot archive should never modify an existing entry, so there are never + // live entries + mHotArchiveBucketList->addBatch(app, header.ledgerSeq, header.ledgerVersion, + archivedEntries, restoredEntries, + deletedEntries); + mArchiveBucketListSizeCounter.set_count(mHotArchiveBucketList->getSize()); } #ifdef BUILD_TESTS @@ -964,7 +1075,19 @@ BucketManagerImpl::snapshotLedger(LedgerHeader& currentHeader) Hash hash; if (mApp.getConfig().MODE_ENABLES_BUCKETLIST) { - hash = mBucketList->getHash(); + if (protocolVersionStartsFrom( + currentHeader.ledgerVersion, + Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + // TODO: Hash Archive Bucket + // Dependency: HAS supports Hot Archive BucketList + + hash = mLiveBucketList->getHash(); + } + else + { + hash = mLiveBucketList->getHash(); + } } currentHeader.bucketListHash = hash; @@ -992,25 +1115,15 @@ BucketManagerImpl::maybeSetIndex(std::shared_ptr b, } void -BucketManagerImpl::scanForEvictionLegacy(AbstractLedgerTxn& ltx, - uint32_t ledgerSeq) -{ - ZoneScoped; - releaseAssert(protocolVersionStartsFrom(ltx.getHeader().ledgerVersion, - SOROBAN_PROTOCOL_VERSION)); - mBucketList->scanForEvictionLegacy( - mApp, ltx, ledgerSeq, mBucketListEvictionCounters, mEvictionStatistics); -} - -void -BucketManagerImpl::startBackgroundEvictionScan(uint32_t ledgerSeq) +BucketManagerImpl::startBackgroundEvictionScan(uint32_t ledgerSeq, + uint32_t ledgerVers) { - releaseAssert(mApp.getConfig().isUsingBucketListDB()); releaseAssert(mSnapshotManager); releaseAssert(!mEvictionFuture.valid()); releaseAssert(mEvictionStatistics); - auto searchableBL = mSnapshotManager->copySearchableBucketListSnapshot(); + auto searchableBL = + mSnapshotManager->copySearchableLiveBucketListSnapshot(); auto const& cfg = mApp.getLedgerManager().getSorobanNetworkConfig(); auto const& sas = cfg.stateArchivalSettings(); @@ -1019,21 +1132,22 @@ BucketManagerImpl::startBackgroundEvictionScan(uint32_t ledgerSeq) // copy this lambda, otherwise we could use unique_ptr. auto task = std::make_shared( [bl = std::move(searchableBL), iter = cfg.evictionIterator(), ledgerSeq, - sas, &counters = mBucketListEvictionCounters, + ledgerVers, sas, &counters = mBucketListEvictionCounters, stats = mEvictionStatistics] { - return bl->scanForEviction(ledgerSeq, counters, iter, stats, sas); + return bl->scanForEviction(ledgerSeq, counters, iter, stats, sas, + ledgerVers); }); mEvictionFuture = task->get_future(); mApp.postOnEvictionBackgroundThread( bind(&task_t::operator(), task), - "SearchableBucketListSnapshot: eviction scan"); + "SearchableLiveBucketListSnapshot: eviction scan"); } -void +std::pair, std::vector> BucketManagerImpl::resolveBackgroundEvictionScan( AbstractLedgerTxn& ltx, uint32_t ledgerSeq, - LedgerKeySet const& modifiedKeys) + LedgerKeySet const& modifiedKeys, uint32_t ledgerVers) { ZoneScoped; releaseAssert(threadIsMain()); @@ -1041,7 +1155,7 @@ BucketManagerImpl::resolveBackgroundEvictionScan( if (!mEvictionFuture.valid()) { - startBackgroundEvictionScan(ledgerSeq); + startBackgroundEvictionScan(ledgerSeq, ledgerVers); } auto evictionCandidates = mEvictionFuture.get(); @@ -1054,44 +1168,60 @@ BucketManagerImpl::resolveBackgroundEvictionScan( if (!evictionCandidates.isValid(ledgerSeq, networkConfig.stateArchivalSettings())) { - startBackgroundEvictionScan(ledgerSeq); + startBackgroundEvictionScan(ledgerSeq, ledgerVers); evictionCandidates = mEvictionFuture.get(); } - auto& eligibleKeys = evictionCandidates.eligibleKeys; + auto& eligibleEntries = evictionCandidates.eligibleEntries; - for (auto iter = eligibleKeys.begin(); iter != eligibleKeys.end();) + for (auto iter = eligibleEntries.begin(); iter != eligibleEntries.end();) { // If the TTL has not been modified this ledger, we can evict the entry - if (modifiedKeys.find(getTTLKey(iter->key)) == modifiedKeys.end()) + if (modifiedKeys.find(getTTLKey(iter->entry)) == modifiedKeys.end()) { ++iter; } else { - iter = eligibleKeys.erase(iter); + iter = eligibleEntries.erase(iter); } } auto remainingEntriesToEvict = networkConfig.stateArchivalSettings().maxEntriesToArchive; - auto entryToEvictIter = eligibleKeys.begin(); + auto entryToEvictIter = eligibleEntries.begin(); auto newEvictionIterator = evictionCandidates.endOfRegionIterator; + // Return vectors include both evicted entry and associated TTL + std::vector deletedKeys; + std::vector archivedEntries; + // Only actually evict up to maxEntriesToArchive of the eligible entries while (remainingEntriesToEvict > 0 && - entryToEvictIter != eligibleKeys.end()) + entryToEvictIter != eligibleEntries.end()) { - ltx.erase(entryToEvictIter->key); - ltx.erase(getTTLKey(entryToEvictIter->key)); + ltx.erase(LedgerEntryKey(entryToEvictIter->entry)); + ltx.erase(getTTLKey(entryToEvictIter->entry)); --remainingEntriesToEvict; + if (isTemporaryEntry(entryToEvictIter->entry.data)) + { + deletedKeys.emplace_back(LedgerEntryKey(entryToEvictIter->entry)); + } + else + { + archivedEntries.emplace_back(entryToEvictIter->entry); + } + + // Delete TTL for both types + deletedKeys.emplace_back(getTTLKey(entryToEvictIter->entry)); + auto age = ledgerSeq - entryToEvictIter->liveUntilLedger; mEvictionStatistics->recordEvictedEntry(age); mBucketListEvictionCounters.entriesEvicted.inc(); newEvictionIterator = entryToEvictIter->iter; - entryToEvictIter = eligibleKeys.erase(entryToEvictIter); + entryToEvictIter = eligibleEntries.erase(entryToEvictIter); } // If remainingEntriesToEvict == 0, that means we could not evict the entire @@ -1104,6 +1234,7 @@ BucketManagerImpl::resolveBackgroundEvictionScan( } networkConfig.updateEvictionIterator(ltx, newEvictionIterator); + return {deletedKeys, archivedEntries}; } medida::Meter& @@ -1166,49 +1297,51 @@ BucketManagerImpl::assumeState(HistoryArchiveState const& has, ZoneScoped; releaseAssertOrThrow(mApp.getConfig().MODE_ENABLES_BUCKETLIST); - for (uint32_t i = 0; i < BucketList::kNumLevels; ++i) + // TODO: Assume archival bucket state + // Dependency: HAS supports Hot Archive BucketList + for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) { - auto curr = getBucketByHash(hexToBin256(has.currentBuckets.at(i).curr)); - auto snap = getBucketByHash(hexToBin256(has.currentBuckets.at(i).snap)); + auto curr = + getLiveBucketByHash(hexToBin256(has.currentBuckets.at(i).curr)); + auto snap = + getLiveBucketByHash(hexToBin256(has.currentBuckets.at(i).snap)); if (!(curr && snap)) { throw std::runtime_error("Missing bucket files while assuming " - "saved BucketList state"); + "saved live BucketList state"); } auto const& nextFuture = has.currentBuckets.at(i).next; - std::shared_ptr nextBucket = nullptr; + std::shared_ptr nextBucket = nullptr; if (nextFuture.hasOutputHash()) { nextBucket = - getBucketByHash(hexToBin256(nextFuture.getOutputHash())); + getLiveBucketByHash(hexToBin256(nextFuture.getOutputHash())); if (!nextBucket) { - throw std::runtime_error("Missing future bucket files while " - "assuming saved BucketList state"); + throw std::runtime_error( + "Missing future bucket files while " + "assuming saved live BucketList state"); } } - // Buckets on the BucketList should always be indexed when - // BucketListDB enabled - if (mApp.getConfig().isUsingBucketListDB()) + // Buckets on the BucketList should always be indexed + releaseAssert(curr->isEmpty() || curr->isIndexed()); + releaseAssert(snap->isEmpty() || snap->isIndexed()); + if (nextBucket) { - releaseAssert(curr->isEmpty() || curr->isIndexed()); - releaseAssert(snap->isEmpty() || snap->isIndexed()); - if (nextBucket) - { - releaseAssert(nextBucket->isEmpty() || nextBucket->isIndexed()); - } + releaseAssert(nextBucket->isEmpty() || nextBucket->isIndexed()); } - mBucketList->getLevel(i).setCurr(curr); - mBucketList->getLevel(i).setSnap(snap); - mBucketList->getLevel(i).setNext(nextFuture); + mLiveBucketList->getLevel(i).setCurr(curr); + mLiveBucketList->getLevel(i).setSnap(snap); + mLiveBucketList->getLevel(i).setNext(nextFuture); } if (restartMerges) { - mBucketList->restartMerges(mApp, maxProtocolVersion, has.currentLedger); + mLiveBucketList->restartMerges(mApp, maxProtocolVersion, + has.currentLedger); } cleanupStaleFiles(); } @@ -1229,14 +1362,14 @@ BucketManagerImpl::isShutdown() const // inserting live or init entries. Should be called in a loop over a BL, from // old to new. static void -loadEntriesFromBucket(std::shared_ptr b, std::string const& name, +loadEntriesFromBucket(std::shared_ptr b, std::string const& name, std::map& map) { ZoneScoped; using namespace std::chrono; medida::Timer timer; - BucketInputIterator in(b); + LiveBucketInputIterator in(b); timer.Time([&]() { while (in) { @@ -1282,7 +1415,7 @@ BucketManagerImpl::loadCompleteLedgerState(HistoryArchiveState const& has) std::map ledgerMap; std::vector> hashes; - for (uint32_t i = BucketList::kNumLevels; i > 0; --i) + for (uint32_t i = LiveBucketList::kNumLevels; i > 0; --i) { HistoryStateBucket const& hsb = has.currentBuckets.at(i - 1); hashes.emplace_back(hexToBin256(hsb.snap), @@ -1296,7 +1429,7 @@ BucketManagerImpl::loadCompleteLedgerState(HistoryArchiveState const& has) { continue; } - auto b = getBucketByHash(pair.first); + auto b = getLiveBucketByHash(pair.first); if (!b) { throw std::runtime_error(std::string("missing bucket: ") + @@ -1307,7 +1440,7 @@ BucketManagerImpl::loadCompleteLedgerState(HistoryArchiveState const& has) return ledgerMap; } -std::shared_ptr +std::shared_ptr BucketManagerImpl::mergeBuckets(HistoryArchiveState const& has) { ZoneScoped; @@ -1317,8 +1450,8 @@ BucketManagerImpl::mergeBuckets(HistoryArchiveState const& has) MergeCounters mc; auto& ctx = mApp.getClock().getIOContext(); meta.ledgerVersion = mApp.getConfig().LEDGER_PROTOCOL_VERSION; - BucketOutputIterator out(getTmpDir(), /*keepDeadEntries=*/false, meta, mc, - ctx, /*doFsync=*/true); + LiveBucketOutputIterator out(getTmpDir(), /*keepTombstoneEntries=*/false, + meta, mc, ctx, /*doFsync=*/true); for (auto const& pair : ledgerMap) { BucketEntry be; @@ -1326,12 +1459,12 @@ BucketManagerImpl::mergeBuckets(HistoryArchiveState const& has) be.liveEntry() = pair.second; out.put(be); } - return out.getBucket(*this, /*shouldSynchronouslyIndex=*/false); + return out.getBucket(*this); } static bool visitLiveEntriesInBucket( - std::shared_ptr b, std::string const& name, + std::shared_ptr b, std::string const& name, std::optional minLedger, std::function const& filterEntry, std::function const& acceptEntry, @@ -1344,7 +1477,7 @@ visitLiveEntriesInBucket( bool stopIteration = false; timer.Time([&]() { - for (BucketInputIterator in(b); in; ++in) + for (LiveBucketInputIterator in(b); in; ++in) { BucketEntry const& e = *in; if (e.type() == LIVEENTRY || e.type() == INITENTRY) @@ -1395,7 +1528,7 @@ visitLiveEntriesInBucket( static bool visitAllEntriesInBucket( - std::shared_ptr b, std::string const& name, + std::shared_ptr b, std::string const& name, std::optional minLedger, std::function const& filterEntry, std::function const& acceptEntry) @@ -1407,7 +1540,7 @@ visitAllEntriesInBucket( bool stopIteration = false; timer.Time([&]() { - for (BucketInputIterator in(b); in; ++in) + for (LiveBucketInputIterator in(b); in; ++in) { BucketEntry const& e = *in; if (e.type() == LIVEENTRY || e.type() == INITENTRY) @@ -1459,7 +1592,7 @@ BucketManagerImpl::visitLedgerEntries( UnorderedSet deletedEntries; std::vector> hashes; - for (uint32_t i = 0; i < BucketList::kNumLevels; ++i) + for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) { HistoryStateBucket const& hsb = has.currentBuckets.at(i); hashes.emplace_back(hexToBin256(hsb.curr), @@ -1475,7 +1608,7 @@ BucketManagerImpl::visitLedgerEntries( { continue; } - auto b = getBucketByHash(pair.first); + auto b = getLiveBucketByHash(pair.first); if (!b) { throw std::runtime_error(std::string("missing bucket: ") + @@ -1511,7 +1644,10 @@ BucketManagerImpl::scheduleVerifyReferencedBucketsWork() { continue; } - auto b = getBucketByHash(h); + + // TODO: Update verify to for ArchiveBucket + // Dependency: HAS supports Hot Archive BucketList + auto b = getBucketByHash(h); if (!b) { throw std::runtime_error(fmt::format( @@ -1530,29 +1666,38 @@ BucketManagerImpl::getConfig() const return mApp.getConfig(); } -std::shared_ptr -BucketManagerImpl::getSearchableBucketListSnapshot() +std::shared_ptr +BucketManagerImpl::getSearchableLiveBucketListSnapshot() { - releaseAssert(mApp.getConfig().isUsingBucketListDB()); // Any other threads must maintain their own snapshot releaseAssert(threadIsMain()); if (!mSearchableBucketListSnapshot) { mSearchableBucketListSnapshot = - mSnapshotManager->copySearchableBucketListSnapshot(); + mSnapshotManager->copySearchableLiveBucketListSnapshot(); } return mSearchableBucketListSnapshot; } -void -BucketManagerImpl::reportBucketEntryCountMetrics() +std::shared_ptr +BucketManagerImpl::getSearchableHotArchiveBucketListSnapshot() { - if (!mApp.getConfig().isUsingBucketListDB()) + // Any other threads must maintain their own snapshot + releaseAssert(threadIsMain()); + if (!mSearchableHotArchiveBucketListSnapshot) { - return; + mSearchableHotArchiveBucketListSnapshot = + mSnapshotManager->copySearchableHotArchiveBucketListSnapshot(); } - auto bucketEntryCounters = mBucketList->sumBucketEntryCounters(); + + return mSearchableHotArchiveBucketListSnapshot; +} + +void +BucketManagerImpl::reportBucketEntryCountMetrics() +{ + auto bucketEntryCounters = mLiveBucketList->sumBucketEntryCounters(); for (auto [type, count] : bucketEntryCounters.entryTypeCounts) { auto countCounter = mBucketListEntryCountCounters.find(type); diff --git a/src/bucket/BucketManagerImpl.h b/src/bucket/BucketManagerImpl.h index 50b6479ede..a4cbf30c3d 100644 --- a/src/bucket/BucketManagerImpl.h +++ b/src/bucket/BucketManagerImpl.h @@ -29,7 +29,7 @@ class TmpDir; class AbstractLedgerTxn; class Application; class Bucket; -class BucketList; +class LiveBucketList; class BucketSnapshotManager; struct BucketEntryCounters; enum class LedgerEntryTypeAndDurability : uint32_t; @@ -41,26 +41,32 @@ class BucketManagerImpl : public BucketManager static std::string const kLockFilename; Application& mApp; - std::unique_ptr mBucketList; + std::unique_ptr mLiveBucketList; + std::unique_ptr mHotArchiveBucketList; std::unique_ptr mSnapshotManager; std::unique_ptr mTmpDirManager; std::unique_ptr mWorkDir; std::map> mSharedBuckets; - std::shared_ptr + std::shared_ptr mSearchableBucketListSnapshot{}; + std::shared_ptr + mSearchableHotArchiveBucketListSnapshot{}; // Lock for managing raw Bucket files or the bucket directory. This lock is // only required for file access, but is not required for logical changes to - // the BucketList (i.e. addBatch). + // a BucketList (i.e. addLiveBatch). mutable std::recursive_mutex mBucketMutex; std::unique_ptr mLockedBucketDir; - medida::Meter& mBucketObjectInsertBatch; - medida::Timer& mBucketAddBatch; + medida::Meter& mBucketLiveObjectInsertBatch; + medida::Meter& mBucketArchiveObjectInsertBatch; + medida::Timer& mBucketAddLiveBatch; + medida::Timer& mBucketAddArchiveBatch; medida::Timer& mBucketSnapMerge; medida::Counter& mSharedBucketsSize; medida::Meter& mBucketListDBBloomMisses; medida::Meter& mBucketListDBBloomLookups; - medida::Counter& mBucketListSizeCounter; + medida::Counter& mLiveBucketListSizeCounter; + medida::Counter& mArchiveBucketListSizeCounter; EvictionCounters mBucketListEvictionCounters; MergeCounters mMergeCounters; std::shared_ptr mEvictionStatistics{}; @@ -71,15 +77,19 @@ class BucketManagerImpl : public BucketManager std::future mEvictionFuture{}; - bool const mDeleteEntireBucketDirInDtor; - // Records bucket-merges that are currently _live_ in some FutureBucket, in // the sense of either running, or finished (with or without the // FutureBucket being resolved). Entries in this map will be cleared when // the FutureBucket is _cleared_ (typically when the owning BucketList level // is committed). - UnorderedMap>> - mLiveFutures; + + using LiveBucketFutureT = std::shared_future>; + using HotArchiveBucketFutureT = + std::shared_future>; + using BucketFutureT = + std::variant; + + UnorderedMap mLiveFutures; // Records bucket-merges that are _finished_, i.e. have been adopted as // (possibly redundant) bucket files. This is a "weak" (bi-multi-)map of @@ -98,6 +108,23 @@ class BucketManagerImpl : public BucketManager size_t numEntries) const; medida::Timer& getPointLoadTimer(LedgerEntryType t) const; + template + std::shared_ptr + adoptFileAsBucket(std::string const& filename, uint256 const& hash, + MergeKey* mergeKey, + std::unique_ptr index); + + template + std::shared_ptr getBucketByHash(uint256 const& hash); + + template + std::shared_future> + getMergeFuture(MergeKey const& key); + + template + void putMergeFuture(MergeKey const& key, + std::shared_future>); + #ifdef BUILD_TESTS bool mUseFakeTestValuesForNextClose{false}; uint32_t mFakeTestProtocolVersion; @@ -117,7 +144,8 @@ class BucketManagerImpl : public BucketManager std::string bucketIndexFilename(Hash const& hash) const override; std::string const& getTmpDir() override; std::string const& getBucketDir() const override; - BucketList& getBucketList() override; + LiveBucketList& getLiveBucketList() override; + HotArchiveBucketList& getHotArchiveBucketList() override; BucketSnapshotManager& getBucketSnapshotManager() const override; medida::Timer& getMergeTimer() override; MergeCounters readMergeCounters() override; @@ -125,43 +153,60 @@ class BucketManagerImpl : public BucketManager TmpDirManager& getTmpDirManager() override; bool renameBucketDirFile(std::filesystem::path const& src, std::filesystem::path const& dst) override; - std::shared_ptr - adoptFileAsBucket(std::string const& filename, uint256 const& hash, - MergeKey* mergeKey, - std::unique_ptr index) override; + std::shared_ptr + adoptFileAsLiveBucket(std::string const& filename, uint256 const& hash, + MergeKey* mergeKey, + std::unique_ptr index) override; + std::shared_ptr adoptFileAsHotArchiveBucket( + std::string const& filename, uint256 const& hash, MergeKey* mergeKey, + std::unique_ptr index) override; void noteEmptyMergeOutput(MergeKey const& mergeKey) override; std::shared_ptr getBucketIfExists(uint256 const& hash) override; - std::shared_ptr getBucketByHash(uint256 const& hash) override; - - std::shared_future> - getMergeFuture(MergeKey const& key) override; - void putMergeFuture(MergeKey const& key, - std::shared_future>) override; + std::shared_ptr + getLiveBucketByHash(uint256 const& hash) override; + std::shared_ptr + getHotArchiveBucketByHash(uint256 const& hash) override; + + std::shared_future> + getLiveMergeFuture(MergeKey const& key) override; + std::shared_future> + getHotArchiveMergeFuture(MergeKey const& key) override; + void putLiveMergeFuture( + MergeKey const& key, + std::shared_future>) override; + void putHotArchiveMergeFuture( + MergeKey const& key, + std::shared_future>) override; #ifdef BUILD_TESTS void clearMergeFuturesForTesting() override; #endif void forgetUnreferencedBuckets() override; - void addBatch(Application& app, LedgerHeader header, - std::vector const& initEntries, - std::vector const& liveEntries, - std::vector const& deadEntries) override; + void addLiveBatch(Application& app, LedgerHeader header, + std::vector const& initEntries, + std::vector const& liveEntries, + std::vector const& deadEntries) override; + void + addHotArchiveBatch(Application& app, LedgerHeader header, + std::vector const& archivedEntries, + std::vector const& restoredEntries, + std::vector const& deletedEntries) override; void snapshotLedger(LedgerHeader& currentHeader) override; void maybeSetIndex(std::shared_ptr b, std::unique_ptr&& index) override; - void scanForEvictionLegacy(AbstractLedgerTxn& ltx, - uint32_t ledgerSeq) override; - void startBackgroundEvictionScan(uint32_t ledgerSeq) override; - void + void startBackgroundEvictionScan(uint32_t ledgerSeq, + uint32_t ledgerVers) override; + std::pair, std::vector> resolveBackgroundEvictionScan(AbstractLedgerTxn& ltx, uint32_t ledgerSeq, - LedgerKeySet const& modifiedKeys) override; + LedgerKeySet const& modifiedKeys, + uint32_t ledgerVers) override; medida::Meter& getBloomMissMeter() const override; medida::Meter& getBloomLookupMeter() const override; #ifdef BUILD_TESTS // Install a fake/assumed ledger version and bucket list hash to use in next - // call to addBatch and snapshotLedger. This interface exists only for + // call to addLiveBatch and snapshotLedger. This interface exists only for // testing in a specific type of history replay. void setNextCloseVersionAndHashForTesting(uint32_t protocolVers, uint256 const& hash) override; @@ -184,7 +229,7 @@ class BucketManagerImpl : public BucketManager std::map loadCompleteLedgerState(HistoryArchiveState const& has) override; - std::shared_ptr + std::shared_ptr mergeBuckets(HistoryArchiveState const& has) override; void visitLedgerEntries( @@ -197,8 +242,12 @@ class BucketManagerImpl : public BucketManager Config const& getConfig() const override; - std::shared_ptr - getSearchableBucketListSnapshot() override; + std::shared_ptr + getSearchableLiveBucketListSnapshot() override; + + std::shared_ptr + getSearchableHotArchiveBucketListSnapshot() override; + void reportBucketEntryCountMetrics() override; }; diff --git a/src/bucket/BucketOutputIterator.cpp b/src/bucket/BucketOutputIterator.cpp index 412cfad724..7d33fde456 100644 --- a/src/bucket/BucketOutputIterator.cpp +++ b/src/bucket/BucketOutputIterator.cpp @@ -6,7 +6,10 @@ #include "bucket/Bucket.h" #include "bucket/BucketIndex.h" #include "bucket/BucketManager.h" +#include "ledger/LedgerTypeUtils.h" #include "util/GlobalChecks.h" +#include "util/ProtocolVersion.h" +#include "xdr/Stellar-ledger.h" #include #include @@ -17,15 +20,17 @@ namespace stellar * Helper class that points to an output tempfile. Absorbs BucketEntries and * hashes them while writing to either destination. Produces a Bucket when done. */ -BucketOutputIterator::BucketOutputIterator(std::string const& tmpDir, - bool keepDeadEntries, - BucketMetadata const& meta, - MergeCounters& mc, - asio::io_context& ctx, bool doFsync) +template +BucketOutputIterator::BucketOutputIterator(std::string const& tmpDir, + bool keepTombstoneEntries, + BucketMetadata const& meta, + MergeCounters& mc, + asio::io_context& ctx, + bool doFsync) : mFilename(Bucket::randomBucketName(tmpDir)) , mOut(ctx, doFsync) , mBuf(nullptr) - , mKeepDeadEntries(keepDeadEntries) + , mKeepTombstoneEntries(keepTombstoneEntries) , mMeta(meta) , mMergeCounters(mc) { @@ -37,34 +42,94 @@ BucketOutputIterator::BucketOutputIterator(std::string const& tmpDir, if (protocolVersionStartsFrom( meta.ledgerVersion, - Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY)) + LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY)) { - BucketEntry bme; - bme.type(METAENTRY); - bme.metaEntry() = mMeta; - put(bme); + + if constexpr (std::is_same_v) + { + BucketEntry bme; + bme.type(METAENTRY); + bme.metaEntry() = mMeta; + put(bme); + } + else + { + releaseAssertOrThrow(protocolVersionStartsFrom( + meta.ledgerVersion, + Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)); + + HotArchiveBucketEntry bme; + bme.type(HOT_ARCHIVE_METAENTRY); + bme.metaEntry() = mMeta; + releaseAssertOrThrow(bme.metaEntry().ext.v() == 1); + put(bme); + } + mPutMeta = true; } } +template void -BucketOutputIterator::put(BucketEntry const& e) +BucketOutputIterator::put(BucketEntryT const& e) { ZoneScoped; - Bucket::checkProtocolLegality(e, mMeta.ledgerVersion); - if (e.type() == METAENTRY) + + if constexpr (std::is_same_v) { - if (mPutMeta) + LiveBucket::checkProtocolLegality(e, mMeta.ledgerVersion); + if (e.type() == METAENTRY) { - throw std::runtime_error( - "putting META entry in bucket after initial entry"); + if (mPutMeta) + { + throw std::runtime_error( + "putting META entry in bucket after initial entry"); + } } - } - if (!mKeepDeadEntries && e.type() == DEADENTRY) + if (!mKeepTombstoneEntries && BucketT::isTombstoneEntry(e)) + { + ++mMergeCounters.mOutputIteratorTombstoneElisions; + return; + } + } + else { - ++mMergeCounters.mOutputIteratorTombstoneElisions; - return; + if (e.type() == HOT_ARCHIVE_METAENTRY) + { + if (mPutMeta) + { + throw std::runtime_error( + "putting META entry in bucket after initial entry"); + } + } + else + { + if (e.type() == HOT_ARCHIVE_ARCHIVED) + { + if (!isSorobanEntry(e.archivedEntry().data)) + { + throw std::runtime_error( + "putting non-soroban entry in hot archive bucket"); + } + } + else + { + if (!isSorobanEntry(e.key())) + { + throw std::runtime_error( + "putting non-soroban entry in hot archive bucket"); + } + } + } + + // HOT_ARCHIVE_LIVE entries are dropped in the last bucket level + // (similar to DEADENTRY) on live BucketLists + if (!mKeepTombstoneEntries && BucketT::isTombstoneEntry(e)) + { + ++mMergeCounters.mOutputIteratorTombstoneElisions; + return; + } } // Check to see if there's an existing buffered entry. @@ -85,7 +150,7 @@ BucketOutputIterator::put(BucketEntry const& e) } else { - mBuf = std::make_unique(); + mBuf = std::make_unique(); } // In any case, replace *mBuf with e. @@ -93,10 +158,10 @@ BucketOutputIterator::put(BucketEntry const& e) *mBuf = e; } -std::shared_ptr -BucketOutputIterator::getBucket(BucketManager& bucketManager, - bool shouldSynchronouslyIndex, - MergeKey* mergeKey) +template +std::shared_ptr +BucketOutputIterator::getBucket(BucketManager& bucketManager, + MergeKey* mergeKey) { ZoneScoped; if (mBuf) @@ -117,25 +182,33 @@ BucketOutputIterator::getBucket(BucketManager& bucketManager, { bucketManager.noteEmptyMergeOutput(*mergeKey); } - return std::make_shared(); + return std::make_shared(); } auto hash = mHasher.finish(); std::unique_ptr index{}; - // If this bucket needs to be indexed and is not already indexed - if (shouldSynchronouslyIndex) + // either it's a new bucket or we just reconstructed a bucket + // we already have, in any case ensure we have an index + if (auto b = bucketManager.getBucketIfExists(hash); !b || !b->isIndexed()) { - // either it's a new bucket or we just reconstructed a bucket - // we already have, in any case ensure we have an index - if (auto b = bucketManager.getBucketIfExists(hash); - !b || !b->isIndexed()) - { - index = BucketIndex::createIndex(bucketManager, mFilename, hash); - } + index = BucketIndex::createIndex(bucketManager, mFilename, + hash); } - return bucketManager.adoptFileAsBucket(mFilename.string(), hash, mergeKey, - std::move(index)); + if constexpr (std::is_same_v) + { + return bucketManager.adoptFileAsLiveBucket(mFilename.string(), hash, + mergeKey, std::move(index)); + } + else + { + + return bucketManager.adoptFileAsHotArchiveBucket( + mFilename.string(), hash, mergeKey, std::move(index)); + } } + +template class BucketOutputIterator; +template class BucketOutputIterator; } diff --git a/src/bucket/BucketOutputIterator.h b/src/bucket/BucketOutputIterator.h index 2b035f5f11..ecfe042c16 100644 --- a/src/bucket/BucketOutputIterator.h +++ b/src/bucket/BucketOutputIterator.h @@ -4,6 +4,7 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 +#include "bucket/Bucket.h" #include "bucket/BucketManager.h" #include "bucket/LedgerCmp.h" #include "util/XDRStream.h" @@ -20,17 +21,23 @@ class BucketManager; // Helper class that writes new elements to a file and returns a bucket // when finished. -class BucketOutputIterator +template class BucketOutputIterator { + static_assert(std::is_same_v || + std::is_same_v); + + using BucketEntryT = std::conditional_t, + BucketEntry, HotArchiveBucketEntry>; + protected: std::filesystem::path mFilename; XDROutputFileStream mOut; - BucketEntryIdCmp mCmp; - std::unique_ptr mBuf; + BucketEntryIdCmp mCmp; + std::unique_ptr mBuf; SHA256 mHasher; size_t mBytesPut{0}; size_t mObjectsPut{0}; - bool mKeepDeadEntries{true}; + bool mKeepTombstoneEntries{true}; BucketMetadata mMeta; bool mPutMeta{false}; MergeCounters& mMergeCounters; @@ -43,14 +50,16 @@ class BucketOutputIterator // version new enough that it should _write_ the metadata to the stream in // the form of a METAENTRY; but that's not a thing the caller gets to decide // (or forget to do), it's handled automatically. - BucketOutputIterator(std::string const& tmpDir, bool keepDeadEntries, + BucketOutputIterator(std::string const& tmpDir, bool keepTombstoneEntries, BucketMetadata const& meta, MergeCounters& mc, asio::io_context& ctx, bool doFsync); - void put(BucketEntry const& e); + void put(BucketEntryT const& e); - std::shared_ptr getBucket(BucketManager& bucketManager, - bool shouldSynchronouslyIndex, - MergeKey* mergeKey = nullptr); + std::shared_ptr getBucket(BucketManager& bucketManager, + MergeKey* mergeKey = nullptr); }; + +typedef BucketOutputIterator LiveBucketOutputIterator; +typedef BucketOutputIterator HotArchiveBucketOutputIterator; } diff --git a/src/bucket/BucketSnapshot.cpp b/src/bucket/BucketSnapshot.cpp index 921076af82..7cfceef275 100644 --- a/src/bucket/BucketSnapshot.cpp +++ b/src/bucket/BucketSnapshot.cpp @@ -7,67 +7,79 @@ #include "bucket/BucketListSnapshot.h" #include "ledger/LedgerTxn.h" #include "ledger/LedgerTypeUtils.h" +#include "util/ProtocolVersion.h" #include "util/XDRStream.h" +#include namespace stellar { -BucketSnapshot::BucketSnapshot(std::shared_ptr const b) +template +BucketSnapshotBase::BucketSnapshotBase( + std::shared_ptr const b) : mBucket(b) { releaseAssert(mBucket); } -BucketSnapshot::BucketSnapshot(BucketSnapshot const& b) +template +BucketSnapshotBase::BucketSnapshotBase( + BucketSnapshotBase const& b) : mBucket(b.mBucket), mStream(nullptr) { releaseAssert(mBucket); } +template bool -BucketSnapshot::isEmpty() const +BucketSnapshotBase::isEmpty() const { releaseAssert(mBucket); return mBucket->isEmpty(); } -std::pair, bool> -BucketSnapshot::getEntryAtOffset(LedgerKey const& k, std::streamoff pos, - size_t pageSize) const +template +std::pair::BucketEntryT>, + bool> +BucketSnapshotBase::getEntryAtOffset(LedgerKey const& k, + std::streamoff pos, + size_t pageSize) const { ZoneScoped; if (isEmpty()) { - return {std::nullopt, false}; + return {nullptr, false}; } auto& stream = getStream(); stream.seek(pos); - BucketEntry be; + BucketEntryT be; if (pageSize == 0) { if (stream.readOne(be)) { - return {std::make_optional(be), false}; + return {std::make_shared(be), false}; } } else if (stream.readPage(be, k, pageSize)) { - return {std::make_optional(be), false}; + return {std::make_shared(be), false}; } // Mark entry miss for metrics mBucket->getIndex().markBloomMiss(); - return {std::nullopt, true}; + return {nullptr, true}; } -std::pair, bool> -BucketSnapshot::getBucketEntry(LedgerKey const& k) const +template +std::pair::BucketEntryT>, + bool> +BucketSnapshotBase::getBucketEntry(LedgerKey const& k) const { ZoneScoped; if (isEmpty()) { - return {std::nullopt, false}; + return {nullptr, false}; } auto pos = mBucket->getIndex().lookup(k); @@ -77,7 +89,7 @@ BucketSnapshot::getBucketEntry(LedgerKey const& k) const mBucket->getIndex().getPageSize()); } - return {std::nullopt, false}; + return {nullptr, false}; } // When searching for an entry, BucketList calls this function on every bucket. @@ -85,10 +97,11 @@ BucketSnapshot::getBucketEntry(LedgerKey const& k) const // If we find the entry, we remove the found key from keys so that later buckets // do not load shadowed entries. If we don't find the entry, we do not remove it // from keys so that it will be searched for again at a lower level. +template void -BucketSnapshot::loadKeysWithLimits(std::set& keys, - std::vector& result, - LedgerKeyMeter* lkMeter) const +BucketSnapshotBase::loadKeys( + std::set& keys, + std::vector& result, LedgerKeyMeter* lkMeter) const { ZoneScoped; if (isEmpty()) @@ -101,32 +114,65 @@ BucketSnapshot::loadKeysWithLimits(std::set& keys, auto indexIter = index.begin(); while (currKeyIt != keys.end() && indexIter != index.end()) { + // lkMeter only supported for LiveBucketList + if (std::is_same_v && lkMeter) + { + auto keySize = xdr::xdr_size(*currKeyIt); + if (!lkMeter->canLoad(*currKeyIt, keySize)) + { + // If the transactions containing this key have a remaining + // quota less than the size of the key, we cannot load the + // entry, as xdr_size(key) <= xdr_size(entry). Here we consume + // keySize bytes from the quotas of transactions containing the + // key so that they will have zero remaining quota and + // additional entries belonging to only those same transactions + // will not be loaded even if they would fit in the remaining + // quota before this update. + lkMeter->updateReadQuotasForKey(*currKeyIt, keySize); + currKeyIt = keys.erase(currKeyIt); + continue; + } + } auto [offOp, newIndexIter] = index.scan(indexIter, *currKeyIt); indexIter = newIndexIter; if (offOp) { auto [entryOp, bloomMiss] = getEntryAtOffset( *currKeyIt, *offOp, mBucket->getIndex().getPageSize()); + if (entryOp) { - if (entryOp->type() != DEADENTRY) + // Don't return tombstone entries, as these do not exist wrt + // ledger state + if (!BucketT::isTombstoneEntry(*entryOp)) { - bool addEntry = true; - if (lkMeter) + // Only live bucket loads can be metered + if constexpr (std::is_same_v) { - // Here, we are metering after the entry has been - // loaded. This is because we need to know the size of - // the entry to meter it. Future work will add metering - // at the xdr level. - auto entrySize = xdr::xdr_size(entryOp->liveEntry()); - addEntry = lkMeter->canLoad(*currKeyIt, entrySize); - lkMeter->updateReadQuotasForKey(*currKeyIt, entrySize); + bool addEntry = true; + if (lkMeter) + { + // Here, we are metering after the entry has been + // loaded. This is because we need to know the size + // of the entry to meter it. Future work will add + // metering at the xdr level. + auto entrySize = + xdr::xdr_size(entryOp->liveEntry()); + addEntry = lkMeter->canLoad(*currKeyIt, entrySize); + lkMeter->updateReadQuotasForKey(*currKeyIt, + entrySize); + } + if (addEntry) + { + result.push_back(entryOp->liveEntry()); + } } - if (addEntry) + else { - result.push_back(entryOp->liveEntry()); + result.push_back(*entryOp); } } + currKeyIt = keys.erase(currKeyIt); continue; } @@ -137,7 +183,7 @@ BucketSnapshot::loadKeysWithLimits(std::set& keys, } std::vector const& -BucketSnapshot::getPoolIDsByAsset(Asset const& asset) const +LiveBucketSnapshot::getPoolIDsByAsset(Asset const& asset) const { static std::vector const emptyVec = {}; if (isEmpty()) @@ -149,13 +195,13 @@ BucketSnapshot::getPoolIDsByAsset(Asset const& asset) const } bool -BucketSnapshot::scanForEviction(EvictionIterator& iter, uint32_t& bytesToScan, - uint32_t ledgerSeq, - std::list& evictableKeys, - SearchableBucketListSnapshot& bl) const +LiveBucketSnapshot::scanForEviction( + EvictionIterator& iter, uint32_t& bytesToScan, uint32_t ledgerSeq, + std::list& evictableKeys, + SearchableLiveBucketListSnapshot& bl, uint32_t ledgerVers) const { ZoneScoped; - if (isEmpty() || protocolVersionIsBefore(Bucket::getBucketVersion(mBucket), + if (isEmpty() || protocolVersionIsBefore(mBucket->getBucketVersion(), SOROBAN_PROTOCOL_VERSION)) { // EOF, skip to next bucket @@ -177,7 +223,7 @@ BucketSnapshot::scanForEviction(EvictionIterator& iter, uint32_t& bytesToScan, for (auto& e : maybeEvictQueue) { // If TTL entry has not yet been deleted - if (auto ttl = loadResult.find(getTTLKey(e.key))->second; + if (auto ttl = loadResult.find(getTTLKey(e.entry))->second; ttl != nullptr) { // If TTL of entry is expired @@ -192,6 +238,20 @@ BucketSnapshot::scanForEviction(EvictionIterator& iter, uint32_t& bytesToScan, } }; + // Start evicting persistent entries in p23 + auto isEvictableType = [ledgerVers](auto const& le) { + if (protocolVersionIsBefore( + ledgerVers, + Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + return isTemporaryEntry(le); + } + else + { + return isSorobanEntry(le); + } + }; + // Open new stream for eviction scan to not interfere with BucketListDB load // streams XDRInputFileStream stream{}; @@ -213,14 +273,13 @@ BucketSnapshot::scanForEviction(EvictionIterator& iter, uint32_t& bytesToScan, if (be.type() == INITENTRY || be.type() == LIVEENTRY) { auto const& le = be.liveEntry(); - if (isTemporaryEntry(le.data)) + if (isEvictableType(le.data)) { keysToSearch.emplace(getTTLKey(le)); // Set lifetime to 0 as default, will be updated after TTL keys // loaded - maybeEvictQueue.emplace_back( - EvictionResultEntry(LedgerEntryKey(le), iter, 0)); + maybeEvictQueue.emplace_back(EvictionResultEntry(le, iter, 0)); } } @@ -240,8 +299,9 @@ BucketSnapshot::scanForEviction(EvictionIterator& iter, uint32_t& bytesToScan, return false; } +template XDRInputFileStream& -BucketSnapshot::getStream() const +BucketSnapshotBase::getStream() const { releaseAssertOrThrow(!isEmpty()); if (!mStream) @@ -252,9 +312,36 @@ BucketSnapshot::getStream() const return *mStream; } -std::shared_ptr -BucketSnapshot::getRawBucket() const +template +std::shared_ptr +BucketSnapshotBase::getRawBucket() const { return mBucket; } + +HotArchiveBucketSnapshot::HotArchiveBucketSnapshot( + std::shared_ptr const b) + : BucketSnapshotBase(b) +{ +} + +LiveBucketSnapshot::LiveBucketSnapshot( + std::shared_ptr const b) + : BucketSnapshotBase(b) +{ +} + +HotArchiveBucketSnapshot::HotArchiveBucketSnapshot( + HotArchiveBucketSnapshot const& b) + : BucketSnapshotBase(b) +{ +} + +LiveBucketSnapshot::LiveBucketSnapshot(LiveBucketSnapshot const& b) + : BucketSnapshotBase(b) +{ +} + +template class BucketSnapshotBase; +template class BucketSnapshotBase; } \ No newline at end of file diff --git a/src/bucket/BucketSnapshot.h b/src/bucket/BucketSnapshot.h index 18faa51c34..e0b195ce28 100644 --- a/src/bucket/BucketSnapshot.h +++ b/src/bucket/BucketSnapshot.h @@ -4,26 +4,38 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 +#include "bucket/Bucket.h" #include "bucket/LedgerCmp.h" #include "util/NonCopyable.h" +#include "xdr/Stellar-ledger-entries.h" #include #include -#include - namespace stellar { -class Bucket; class XDRInputFileStream; -class SearchableBucketListSnapshot; struct EvictionResultEntry; class LedgerKeyMeter; +class SearchableLiveBucketListSnapshot; // A lightweight wrapper around Bucket for thread safe BucketListDB lookups -class BucketSnapshot : public NonMovable +template class BucketSnapshotBase : public NonMovable { - std::shared_ptr const mBucket; + static_assert(std::is_same_v || + std::is_same_v); + + protected: + using BucketEntryT = std::conditional_t, + BucketEntry, HotArchiveBucketEntry>; + + // LiveBucket returns LedgerEntry vector on call to loadKeys, + // HotArchiveBucket returns HotArchiveBucketEntry + using BulkLoadReturnT = + std::conditional_t, LedgerEntry, + HotArchiveBucketEntry>; + + std::shared_ptr const mBucket; // Lazily-constructed and retained for read path. mutable std::unique_ptr mStream{}; @@ -37,32 +49,42 @@ class BucketSnapshot : public NonMovable // reads until key is found or the end of the page. Returns , where bloomMiss is true if a bloomMiss occurred during the // load. - std::pair, bool> + std::pair, bool> getEntryAtOffset(LedgerKey const& k, std::streamoff pos, size_t pageSize) const; - BucketSnapshot(std::shared_ptr const b); + BucketSnapshotBase(std::shared_ptr const b); // Only allow copy constructor, is threadsafe - BucketSnapshot(BucketSnapshot const& b); - BucketSnapshot& operator=(BucketSnapshot const&) = delete; + BucketSnapshotBase(BucketSnapshotBase const& b); + BucketSnapshotBase& operator=(BucketSnapshotBase const&) = delete; public: bool isEmpty() const; - std::shared_ptr getRawBucket() const; + std::shared_ptr getRawBucket() const; // Loads bucket entry for LedgerKey k. Returns , // where bloomMiss is true if a bloomMiss occurred during the load. - std::pair, bool> + std::pair, bool> getBucketEntry(LedgerKey const& k) const; // Loads LedgerEntry's for given keys. When a key is found, the // entry is added to result and the key is removed from keys. // If a pointer to a LedgerKeyMeter is provided, a key will only be loaded // if the meter has a transaction with sufficient read quota for the key. - void loadKeysWithLimits(std::set& keys, - std::vector& result, - LedgerKeyMeter* lkMeter) const; + // If Bucket is not of type LiveBucket, lkMeter is ignored. + void loadKeys(std::set& keys, + std::vector& result, + LedgerKeyMeter* lkMeter) const; +}; + +class LiveBucketSnapshot : public BucketSnapshotBase +{ + public: + LiveBucketSnapshot(std::shared_ptr const b); + + // Only allow copy constructors, is threadsafe + LiveBucketSnapshot(LiveBucketSnapshot const& b); // Return all PoolIDs that contain the given asset on either side of the // pool @@ -71,8 +93,16 @@ class BucketSnapshot : public NonMovable bool scanForEviction(EvictionIterator& iter, uint32_t& bytesToScan, uint32_t ledgerSeq, std::list& evictableKeys, - SearchableBucketListSnapshot& bl) const; + SearchableLiveBucketListSnapshot& bl, + uint32_t ledgerVers) const; +}; + +class HotArchiveBucketSnapshot : public BucketSnapshotBase +{ + public: + HotArchiveBucketSnapshot(std::shared_ptr const b); - friend struct BucketLevelSnapshot; + // Only allow copy constructors, is threadsafe + HotArchiveBucketSnapshot(HotArchiveBucketSnapshot const& b); }; } \ No newline at end of file diff --git a/src/bucket/BucketSnapshotManager.cpp b/src/bucket/BucketSnapshotManager.cpp index 52f907307b..703da5c21f 100644 --- a/src/bucket/BucketSnapshotManager.cpp +++ b/src/bucket/BucketSnapshotManager.cpp @@ -3,8 +3,10 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "bucket/BucketSnapshotManager.h" +#include "bucket/Bucket.h" #include "bucket/BucketListSnapshot.h" #include "main/Application.h" +#include "util/GlobalChecks.h" #include "util/XDRStream.h" // IWYU pragma: keep #include "medida/meter.h" @@ -15,12 +17,17 @@ namespace stellar { BucketSnapshotManager::BucketSnapshotManager( - Application& app, std::unique_ptr&& snapshot, - uint32_t numHistoricalSnapshots) + Application& app, + std::unique_ptr const>&& snapshot, + std::unique_ptr const>&& + hotArchiveSnapshot, + uint32_t numLiveHistoricalSnapshots) : mApp(app) - , mCurrentSnapshot(std::move(snapshot)) - , mHistoricalSnapshots() - , mNumHistoricalSnapshots(numHistoricalSnapshots) + , mCurrLiveSnapshot(std::move(snapshot)) + , mCurrHotArchiveSnapshot(std::move(hotArchiveSnapshot)) + , mLiveHistoricalSnapshots() + , mHotArchiveHistoricalSnapshots() + , mNumHistoricalSnapshots(numLiveHistoricalSnapshots) , mBulkLoadMeter(app.getMetrics().NewMeter( {"bucketlistDB", "query", "loads"}, "query")) , mBloomMisses(app.getMetrics().NewMeter( @@ -29,14 +36,25 @@ BucketSnapshotManager::BucketSnapshotManager( {"bucketlistDB", "bloom", "lookups"}, "bloom")) { releaseAssert(threadIsMain()); + releaseAssert(mCurrLiveSnapshot); + releaseAssert(mCurrHotArchiveSnapshot); } -std::shared_ptr -BucketSnapshotManager::copySearchableBucketListSnapshot() const +std::shared_ptr +BucketSnapshotManager::copySearchableLiveBucketListSnapshot() const { // Can't use std::make_shared due to private constructor - return std::shared_ptr( - new SearchableBucketListSnapshot(*this)); + return std::shared_ptr( + new SearchableLiveBucketListSnapshot(*this)); +} + +std::shared_ptr +BucketSnapshotManager::copySearchableHotArchiveBucketListSnapshot() const +{ + releaseAssert(mCurrHotArchiveSnapshot); + // Can't use std::make_shared due to private constructor + return std::shared_ptr( + new SearchableHotArchiveBucketListSnapshot(*this)); } medida::Timer& @@ -63,12 +81,43 @@ BucketSnapshotManager::recordBulkLoadMetrics(std::string const& label, return iter->second; } +template void BucketSnapshotManager::maybeUpdateSnapshot( - std::unique_ptr& snapshot, - std::map>& - historicalSnapshots) const + std::unique_ptr& snapshot, + std::map>& historicalSnapshots) + const { + static_assert( + std::is_same_v> || + std::is_same_v>); + + auto const& managerSnapshot = [&]() -> auto const& + { + if constexpr (std::is_same_v>) + { + return mCurrLiveSnapshot; + } + else + { + return mCurrHotArchiveSnapshot; + } + } + (); + + auto const& managerHistoricalSnapshots = [&]() -> auto const& + { + if constexpr (std::is_same_v>) + { + return mLiveHistoricalSnapshots; + } + else + { + return mHotArchiveHistoricalSnapshots; + } + } + (); + // The canonical snapshot held by the BucketSnapshotManager is not being // modified. Rather, a thread is checking it's copy against the canonical // snapshot, so use a shared lock. @@ -76,64 +125,74 @@ BucketSnapshotManager::maybeUpdateSnapshot( // First update current snapshot if (!snapshot || - snapshot->getLedgerSeq() != mCurrentSnapshot->getLedgerSeq()) + snapshot->getLedgerSeq() != managerSnapshot->getLedgerSeq()) { // Should only update with a newer snapshot releaseAssert(!snapshot || snapshot->getLedgerSeq() < - mCurrentSnapshot->getLedgerSeq()); - snapshot = std::make_unique(*mCurrentSnapshot); + managerSnapshot->getLedgerSeq()); + snapshot = std::make_unique(*managerSnapshot); } // Then update historical snapshots (if any exist) - if (mHistoricalSnapshots.empty()) + if (managerHistoricalSnapshots.empty()) { return; } // If size of manager's history map is different, or if the oldest snapshot // ledger seq is different, we need to update. - if (mHistoricalSnapshots.size() != historicalSnapshots.size() || - mHistoricalSnapshots.begin()->first != + if (managerHistoricalSnapshots.size() != historicalSnapshots.size() || + managerHistoricalSnapshots.begin()->first != historicalSnapshots.begin()->first) { // Copy current snapshot map into historicalSnapshots historicalSnapshots.clear(); - for (auto const& [ledgerSeq, snap] : mHistoricalSnapshots) + for (auto const& [ledgerSeq, snap] : managerHistoricalSnapshots) { - historicalSnapshots.emplace( - ledgerSeq, std::make_unique(*snap)); + historicalSnapshots.emplace(ledgerSeq, + std::make_unique(*snap)); } } } void BucketSnapshotManager::updateCurrentSnapshot( - std::unique_ptr&& newSnapshot) + std::unique_ptr const>&& liveSnapshot, + std::unique_ptr const>&& + hotArchiveSnapshot) { - releaseAssert(newSnapshot); releaseAssert(threadIsMain()); - // Updating the BucketSnapshotManager canonical snapshot, must lock - // exclusively for write access. - std::unique_lock lock(mSnapshotMutex); - releaseAssert(!mCurrentSnapshot || newSnapshot->getLedgerSeq() >= - mCurrentSnapshot->getLedgerSeq()); + auto updateSnapshot = [numHistoricalSnapshots = mNumHistoricalSnapshots]( + auto& currentSnapshot, auto& historicalSnapshots, + auto&& newSnapshot) { + releaseAssert(newSnapshot); + releaseAssert(!currentSnapshot || newSnapshot->getLedgerSeq() >= + currentSnapshot->getLedgerSeq()); - // First update historical snapshots - if (mNumHistoricalSnapshots != 0) - { - // If historical snapshots are full, delete the oldest one - if (mHistoricalSnapshots.size() == mNumHistoricalSnapshots) + // First update historical snapshots + if (numHistoricalSnapshots != 0) { - mHistoricalSnapshots.erase(mHistoricalSnapshots.begin()); + // If historical snapshots are full, delete the oldest one + if (historicalSnapshots.size() == numHistoricalSnapshots) + { + historicalSnapshots.erase(historicalSnapshots.begin()); + } + + historicalSnapshots.emplace(currentSnapshot->getLedgerSeq(), + std::move(currentSnapshot)); + currentSnapshot = nullptr; } - mHistoricalSnapshots.emplace(mCurrentSnapshot->getLedgerSeq(), - std::move(mCurrentSnapshot)); - mCurrentSnapshot = nullptr; - } + currentSnapshot.swap(newSnapshot); + }; - mCurrentSnapshot.swap(newSnapshot); + // Updating the BucketSnapshotManager canonical snapshot, must lock + // exclusively for write access. + std::unique_lock lock(mSnapshotMutex); + updateSnapshot(mCurrLiveSnapshot, mLiveHistoricalSnapshots, liveSnapshot); + updateSnapshot(mCurrHotArchiveSnapshot, mHotArchiveHistoricalSnapshots, + hotArchiveSnapshot); } void @@ -170,4 +229,16 @@ BucketSnapshotManager::endPointLoadTimer(LedgerEntryType t, iter->second.Update(duration); } } + +template void +BucketSnapshotManager::maybeUpdateSnapshot>( + std::unique_ptr const>& snapshot, + std::map const>>& + historicalSnapshots) const; +template void BucketSnapshotManager::maybeUpdateSnapshot< + BucketListSnapshot>( + std::unique_ptr const>& snapshot, + std::map const>>& + historicalSnapshots) const; } \ No newline at end of file diff --git a/src/bucket/BucketSnapshotManager.h b/src/bucket/BucketSnapshotManager.h index 71b33862b0..de44f6f165 100644 --- a/src/bucket/BucketSnapshotManager.h +++ b/src/bucket/BucketSnapshotManager.h @@ -4,6 +4,8 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 +#include "bucket/Bucket.h" +#include "bucket/BucketList.h" #include "bucket/BucketManagerImpl.h" #include "util/NonCopyable.h" #include "util/UnorderedMap.h" @@ -23,8 +25,10 @@ namespace stellar { class Application; -class BucketList; -class BucketListSnapshot; +class LiveBucketList; +template class BucketListSnapshot; +class SearchableLiveBucketListSnapshot; +class SearchableHotArchiveBucketListSnapshot; // This class serves as the boundary between non-threadsafe singleton classes // (BucketManager, BucketList, Metrics, etc) and threadsafe, parallel BucketList @@ -37,16 +41,20 @@ class BucketSnapshotManager : NonMovableOrCopyable // Snapshot that is maintained and periodically updated by BucketManager on // the main thread. When background threads need to generate or refresh a // snapshot, they will copy this snapshot. - std::unique_ptr mCurrentSnapshot{}; + std::unique_ptr const> mCurrLiveSnapshot{}; + std::unique_ptr const> + mCurrHotArchiveSnapshot{}; // ledgerSeq that the snapshot is based on -> snapshot - std::map> - mHistoricalSnapshots; + std::map const>> + mLiveHistoricalSnapshots; + std::map const>> + mHotArchiveHistoricalSnapshots; uint32_t const mNumHistoricalSnapshots; - // Lock must be held when accessing mCurrentSnapshot and - // mHistoricalSnapshots + // Lock must be held when accessing any snapshot mutable std::shared_mutex mSnapshotMutex; mutable UnorderedMap mPointTimers{}; @@ -59,26 +67,35 @@ class BucketSnapshotManager : NonMovableOrCopyable mutable std::optional mTimerStart; public: - // Called by main thread to update mCurrentSnapshot whenever the BucketList + // Called by main thread to update snapshots whenever the BucketList // is updated void updateCurrentSnapshot( - std::unique_ptr&& newSnapshot); + std::unique_ptr const>&& liveSnapshot, + std::unique_ptr const>&& + hotArchiveSnapshot); + // numHistoricalLedgers is the number of historical snapshots that the // snapshot manager will maintain. If numHistoricalLedgers is 5, snapshots // will be capable of querying state from ledger [lcl, lcl - 5]. - BucketSnapshotManager(Application& app, - std::unique_ptr&& snapshot, - uint32_t numHistoricalLedgers); - - std::shared_ptr - copySearchableBucketListSnapshot() const; - - // Checks if snapshot is out of date with mCurrentSnapshot and updates - // it accordingly - void maybeUpdateSnapshot( - std::unique_ptr& snapshot, - std::map>& - historicalSnapshots) const; + BucketSnapshotManager( + Application& app, + std::unique_ptr const>&& snapshot, + std::unique_ptr const>&& + hotArchiveSnapshot, + uint32_t numHistoricalLedgers); + + std::shared_ptr + copySearchableLiveBucketListSnapshot() const; + + std::shared_ptr + copySearchableHotArchiveBucketListSnapshot() const; + + // Checks if snapshot is out of date and updates it accordingly + template + void + maybeUpdateSnapshot(std::unique_ptr& snapshot, + std::map>& + historicalSnapshots) const; // All metric recording functions must only be called by the main thread void startPointLoadTimer() const; diff --git a/src/bucket/FutureBucket.cpp b/src/bucket/FutureBucket.cpp index 981708e196..bc2dec6d16 100644 --- a/src/bucket/FutureBucket.cpp +++ b/src/bucket/FutureBucket.cpp @@ -18,6 +18,7 @@ #include "util/GlobalChecks.h" #include "util/LogSlowExecution.h" #include "util/Logging.h" +#include "util/ProtocolVersion.h" #include "util/Thread.h" #include #include @@ -25,16 +26,17 @@ #include "medida/metrics_registry.h" #include +#include +#include namespace stellar { - -FutureBucket::FutureBucket(Application& app, - std::shared_ptr const& curr, - std::shared_ptr const& snap, - std::vector> const& shadows, - uint32_t maxProtocolVersion, bool countMergeEvents, - uint32_t level) +template +FutureBucket::FutureBucket( + Application& app, std::shared_ptr const& curr, + std::shared_ptr const& snap, + std::vector> const& shadows, + uint32_t maxProtocolVersion, bool countMergeEvents, uint32_t level) : mState(FB_LIVE_INPUTS) , mInputCurrBucket(curr) , mInputSnapBucket(snap) @@ -48,8 +50,8 @@ FutureBucket::FutureBucket(Application& app, releaseAssert(snap); mInputCurrBucketHash = binToHex(curr->getHash()); mInputSnapBucketHash = binToHex(snap->getHash()); - if (protocolVersionStartsFrom(Bucket::getBucketVersion(snap), - Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) + if (protocolVersionStartsFrom(snap->getBucketVersion(), + LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) { if (!mInputShadowBuckets.empty()) { @@ -57,6 +59,20 @@ FutureBucket::FutureBucket(Application& app, "Invalid FutureBucket: ledger version doesn't support shadows"); } } + + if constexpr (!std::is_same_v) + { + if (!snap->isEmpty() && + protocolVersionIsBefore( + snap->getBucketVersion(), + Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + throw std::runtime_error( + "Invalid ArchivalFutureBucket: ledger version doesn't support " + "Archival BucketList"); + } + } + for (auto const& b : mInputShadowBuckets) { mInputShadowBucketHashes.push_back(binToHex(b->getHash())); @@ -64,8 +80,9 @@ FutureBucket::FutureBucket(Application& app, startMerge(app, maxProtocolVersion, countMergeEvents, level); } +template void -FutureBucket::setLiveOutput(std::shared_ptr output) +FutureBucket::setLiveOutput(std::shared_ptr output) { ZoneScoped; mState = FB_LIVE_OUTPUT; @@ -74,14 +91,16 @@ FutureBucket::setLiveOutput(std::shared_ptr output) checkState(); } +template static void -checkHashEq(std::shared_ptr const& b, std::string const& h) +checkHashEq(std::shared_ptr const& b, std::string const& h) { releaseAssert(b->getHash() == hexToBin256(h)); } +template void -FutureBucket::checkHashesMatch() const +FutureBucket::checkHashesMatch() const { ZoneScoped; if (!mInputShadowBuckets.empty()) @@ -114,8 +133,9 @@ FutureBucket::checkHashesMatch() const * the different hash-only states are mutually exclusive with each other and * with live values. */ +template void -FutureBucket::checkState() const +FutureBucket::checkState() const { switch (mState) { @@ -174,8 +194,9 @@ FutureBucket::checkState() const } } +template void -FutureBucket::clearInputs() +FutureBucket::clearInputs() { mInputShadowBuckets.clear(); mInputSnapBucket.reset(); @@ -186,50 +207,57 @@ FutureBucket::clearInputs() mInputCurrBucketHash.clear(); } +template void -FutureBucket::clearOutput() +FutureBucket::clearOutput() { // NB: MSVC future<> implementation doesn't purge the task lambda (and // its captures) on invalidation (due to get()); must explicitly reset. - mOutputBucketFuture = std::shared_future>(); + mOutputBucketFuture = std::shared_future>(); mOutputBucketHash.clear(); mOutputBucket.reset(); } +template void -FutureBucket::clear() +FutureBucket::clear() { mState = FB_CLEAR; clearInputs(); clearOutput(); } +template bool -FutureBucket::isLive() const +FutureBucket::isLive() const { return (mState == FB_LIVE_INPUTS || mState == FB_LIVE_OUTPUT); } +template bool -FutureBucket::isMerging() const +FutureBucket::isMerging() const { return mState == FB_LIVE_INPUTS; } +template bool -FutureBucket::hasHashes() const +FutureBucket::hasHashes() const { return (mState == FB_HASH_INPUTS || mState == FB_HASH_OUTPUT); } +template bool -FutureBucket::isClear() const +FutureBucket::isClear() const { return mState == FB_CLEAR; } +template bool -FutureBucket::mergeComplete() const +FutureBucket::mergeComplete() const { ZoneScoped; releaseAssert(isLive()); @@ -241,8 +269,9 @@ FutureBucket::mergeComplete() const return futureIsReady(mOutputBucketFuture); } -std::shared_ptr -FutureBucket::resolve() +template +std::shared_ptr +FutureBucket::resolve() { ZoneScoped; checkState(); @@ -264,7 +293,7 @@ FutureBucket::resolve() // Explicitly reset shared_future to ensure destruction of shared state. // Some compilers store packaged_task lambdas in the shared state, // keeping its captures alive as long as the future is alive. - mOutputBucketFuture = std::shared_future>(); + mOutputBucketFuture = std::shared_future>(); } mState = FB_LIVE_OUTPUT; @@ -272,8 +301,9 @@ FutureBucket::resolve() return mOutputBucket; } +template bool -FutureBucket::hasOutputHash() const +FutureBucket::hasOutputHash() const { if (mState == FB_LIVE_OUTPUT || mState == FB_HASH_OUTPUT) { @@ -283,28 +313,31 @@ FutureBucket::hasOutputHash() const return false; } +template std::string const& -FutureBucket::getOutputHash() const +FutureBucket::getOutputHash() const { releaseAssert(mState == FB_LIVE_OUTPUT || mState == FB_HASH_OUTPUT); releaseAssert(!mOutputBucketHash.empty()); return mOutputBucketHash; } +template static std::chrono::seconds getAvailableTimeForMerge(Application& app, uint32_t level) { auto closeTime = app.getConfig().getExpectedLedgerCloseTime(); if (level >= 1) { - return closeTime * BucketList::levelHalf(level - 1); + return closeTime * BucketListBase::levelHalf(level - 1); } return closeTime; } +template void -FutureBucket::startMerge(Application& app, uint32_t maxProtocolVersion, - bool countMergeEvents, uint32_t level) +FutureBucket::startMerge(Application& app, uint32_t maxProtocolVersion, + bool countMergeEvents, uint32_t level) { ZoneScoped; // NB: startMerge starts with FutureBucket in a half-valid state; the inputs @@ -313,9 +346,9 @@ FutureBucket::startMerge(Application& app, uint32_t maxProtocolVersion, releaseAssert(mState == FB_LIVE_INPUTS); - std::shared_ptr curr = mInputCurrBucket; - std::shared_ptr snap = mInputSnapBucket; - std::vector> shadows = mInputShadowBuckets; + std::shared_ptr curr = mInputCurrBucket; + std::shared_ptr snap = mInputSnapBucket; + std::vector> shadows = mInputShadowBuckets; releaseAssert(curr); releaseAssert(snap); @@ -329,13 +362,31 @@ FutureBucket::startMerge(Application& app, uint32_t maxProtocolVersion, auto& timer = app.getMetrics().NewTimer( {"bucket", "merge-time", "level-" + std::to_string(level)}); + std::vector shadowHashes; + shadowHashes.reserve(shadows.size()); + for (auto const& b : shadows) + { + shadowHashes.emplace_back(b->getHash()); + } + // It's possible we're running a merge that's already running, for example // due to having been serialized to the publish queue and then immediately // deserialized. In this case we want to attach to the existing merge, which // will have left a std::shared_future behind in a shared cache in the // bucket manager. - MergeKey mk{BucketList::keepDeadEntries(level), curr, snap, shadows}; - auto f = bm.getMergeFuture(mk); + MergeKey mk{BucketListBase::keepTombstoneEntries(level), + curr->getHash(), snap->getHash(), shadowHashes}; + + std::shared_future> f; + if constexpr (std::is_same_v) + { + f = bm.getLiveMergeFuture(mk); + } + else + { + f = bm.getHotArchiveMergeFuture(mk); + } + if (f.valid()) { CLOG_TRACE(Bucket, @@ -347,9 +398,10 @@ FutureBucket::startMerge(Application& app, uint32_t maxProtocolVersion, } asio::io_context& ctx = app.getWorkerIOContext(); bool doFsync = !app.getConfig().DISABLE_XDR_FSYNC; - std::chrono::seconds availableTime = getAvailableTimeForMerge(app, level); + std::chrono::seconds availableTime = + getAvailableTimeForMerge(app, level); - using task_t = std::packaged_task()>; + using task_t = std::packaged_task()>; std::shared_ptr task = std::make_shared( [curr, snap, &bm, shadows, maxProtocolVersion, countMergeEvents, level, &timer, &ctx, doFsync, availableTime]() mutable { @@ -362,10 +414,10 @@ FutureBucket::startMerge(Application& app, uint32_t maxProtocolVersion, ZoneNamedN(mergeZone, "Merge task", true); ZoneValueV(mergeZone, static_cast(level)); - auto res = - Bucket::merge(bm, maxProtocolVersion, curr, snap, shadows, - BucketList::keepDeadEntries(level), - countMergeEvents, ctx, doFsync); + auto res = Bucket::merge( + bm, maxProtocolVersion, curr, snap, shadows, + BucketListBase::keepTombstoneEntries(level), + countMergeEvents, ctx, doFsync); if (res) { @@ -395,15 +447,24 @@ FutureBucket::startMerge(Application& app, uint32_t maxProtocolVersion, }); mOutputBucketFuture = task->get_future().share(); - bm.putMergeFuture(mk, mOutputBucketFuture); + if constexpr (std::is_same_v) + { + bm.putLiveMergeFuture(mk, mOutputBucketFuture); + } + else + { + bm.putHotArchiveMergeFuture(mk, mOutputBucketFuture); + } + app.postOnBackgroundThread(bind(&task_t::operator(), task), "FutureBucket: merge"); checkState(); } +template void -FutureBucket::makeLive(Application& app, uint32_t maxProtocolVersion, - uint32_t level) +FutureBucket::makeLive(Application& app, uint32_t maxProtocolVersion, + uint32_t level) { ZoneScoped; checkState(); @@ -412,20 +473,48 @@ FutureBucket::makeLive(Application& app, uint32_t maxProtocolVersion, auto& bm = app.getBucketManager(); if (hasOutputHash()) { - auto b = bm.getBucketByHash(hexToBin256(getOutputHash())); + std::shared_ptr b; + if constexpr (std::is_same_v) + { + b = bm.getLiveBucketByHash(hexToBin256(getOutputHash())); + } + else + { + b = bm.getHotArchiveBucketByHash(hexToBin256(getOutputHash())); + } + setLiveOutput(b); } else { releaseAssert(mState == FB_HASH_INPUTS); - mInputCurrBucket = - bm.getBucketByHash(hexToBin256(mInputCurrBucketHash)); - mInputSnapBucket = - bm.getBucketByHash(hexToBin256(mInputSnapBucketHash)); + if constexpr (std::is_same_v) + { + mInputCurrBucket = + bm.getLiveBucketByHash(hexToBin256(mInputCurrBucketHash)); + mInputSnapBucket = + bm.getLiveBucketByHash(hexToBin256(mInputSnapBucketHash)); + } + else + { + mInputCurrBucket = + bm.getHotArchiveBucketByHash(hexToBin256(mInputCurrBucketHash)); + mInputSnapBucket = + bm.getHotArchiveBucketByHash(hexToBin256(mInputSnapBucketHash)); + } releaseAssert(mInputShadowBuckets.empty()); for (auto const& h : mInputShadowBucketHashes) { - auto b = bm.getBucketByHash(hexToBin256(h)); + std::shared_ptr b; + if constexpr (std::is_same_v) + { + b = bm.getLiveBucketByHash(hexToBin256(h)); + } + else + { + b = bm.getHotArchiveBucketByHash(hexToBin256(h)); + } + releaseAssert(b); CLOG_DEBUG(Bucket, "Reconstituting shadow {}", h); mInputShadowBuckets.push_back(b); @@ -436,8 +525,9 @@ FutureBucket::makeLive(Application& app, uint32_t maxProtocolVersion, } } +template std::vector -FutureBucket::getHashes() const +FutureBucket::getHashes() const { ZoneScoped; std::vector hashes; @@ -459,4 +549,7 @@ FutureBucket::getHashes() const } return hashes; } + +template class FutureBucket; +template class FutureBucket; } diff --git a/src/bucket/FutureBucket.h b/src/bucket/FutureBucket.h index 4866d90235..cda7e6b61c 100644 --- a/src/bucket/FutureBucket.h +++ b/src/bucket/FutureBucket.h @@ -4,6 +4,7 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 +#include "bucket/Bucket.h" #include "util/GlobalChecks.h" #include #include @@ -16,13 +17,16 @@ namespace stellar class Bucket; class Application; +class LiveBucket; +class HotArchiveBucket; /** * FutureBucket is a minor wrapper around - * std::shared_future>, used in merging multiple buckets - * together in the BucketList. The reason this is a separate class is that we - * need to support a level of persistence: serializing merges-in-progress in a - * symbolic fashion, including restarting the merges after we deserialize. + * std::shared_future>, used in merging multiple + * buckets together in the BucketList. The reason this is a separate class is + * that we need to support a level of persistence: serializing + * merges-in-progress in a symbolic fashion, including restarting the merges + * after we deserialize. * * This class is therefore used not _only_ in the BucketList but also in places * that serialize and deserialize snapshots of it in the form of @@ -30,8 +34,11 @@ class Application; * the bottom of closeLedger; and the HistoryManager, when storing and * retrieving HistoryArchiveStates. */ -class FutureBucket +template class FutureBucket { + static_assert(std::is_same_v || + std::is_same_v); + // There are two lifecycles of a FutureBucket: // // In one, it's created live, snapshotted at some point in the process @@ -56,11 +63,11 @@ class FutureBucket // FutureBucket is constructed, when it is reset, or when it is freshly // deserialized and not yet activated. When they are nonempty, they should // have values equal to the subsequent mFooHash values below. - std::shared_ptr mInputCurrBucket; - std::shared_ptr mInputSnapBucket; - std::vector> mInputShadowBuckets; - std::shared_ptr mOutputBucket; - std::shared_future> mOutputBucketFuture; + std::shared_ptr mInputCurrBucket; + std::shared_ptr mInputSnapBucket; + std::vector> mInputShadowBuckets; + std::shared_ptr mOutputBucket; + std::shared_future> mOutputBucketFuture; // These strings hold the serializable (or deserialized) bucket hashes of // the inputs and outputs of a merge; depending on the state of the @@ -79,12 +86,12 @@ class FutureBucket void clearInputs(); void clearOutput(); - void setLiveOutput(std::shared_ptr b); + void setLiveOutput(std::shared_ptr b); public: - FutureBucket(Application& app, std::shared_ptr const& curr, - std::shared_ptr const& snap, - std::vector> const& shadows, + FutureBucket(Application& app, std::shared_ptr const& curr, + std::shared_ptr const& snap, + std::vector> const& shadows, uint32_t maxProtocolVersion, bool countMergeEvents, uint32_t level); @@ -118,7 +125,7 @@ class FutureBucket bool mergeComplete() const; // Precondition: isLive(); waits-for and resolves to merged bucket. - std::shared_ptr resolve(); + std::shared_ptr resolve(); // Precondition: !isLive(); transitions from FB_HASH_FOO to FB_LIVE_FOO void makeLive(Application& app, uint32_t maxProtocolVersion, diff --git a/src/bucket/LedgerCmp.h b/src/bucket/LedgerCmp.h index 6551448f97..cc550a4e96 100644 --- a/src/bucket/LedgerCmp.h +++ b/src/bucket/LedgerCmp.h @@ -13,6 +13,9 @@ namespace stellar { +class LiveBucket; +class HotArchiveBucket; + template bool lexCompare(T&& lhs1, T&& rhs1) @@ -126,10 +129,70 @@ struct LedgerEntryIdCmp * LedgerEntries (ignoring their hashes, as the LedgerEntryIdCmp ignores their * bodies). */ -struct BucketEntryIdCmp +template struct BucketEntryIdCmp { + static_assert(std::is_same_v || + std::is_same_v); + + using BucketEntryT = std::conditional_t, + BucketEntry, HotArchiveBucketEntry>; + bool - operator()(BucketEntry const& a, BucketEntry const& b) const + compareHotArchive(HotArchiveBucketEntry const& a, + HotArchiveBucketEntry const& b) const + { + HotArchiveBucketEntryType aty = a.type(); + HotArchiveBucketEntryType bty = b.type(); + + // METAENTRY sorts below all other entries, comes first in buckets. + if (aty == HOT_ARCHIVE_METAENTRY || bty == HOT_ARCHIVE_METAENTRY) + { + return aty < bty; + } + + if (aty == HOT_ARCHIVE_ARCHIVED) + { + if (bty == HOT_ARCHIVE_ARCHIVED) + { + return LedgerEntryIdCmp{}(a.archivedEntry().data, + b.archivedEntry().data); + } + else + { + if (bty != HOT_ARCHIVE_DELETED && bty != HOT_ARCHIVE_LIVE) + { + throw std::runtime_error("Malformed bucket: expected " + "DELETED/LIVE key."); + } + return LedgerEntryIdCmp{}(a.archivedEntry().data, b.key()); + } + } + else + { + if (aty != HOT_ARCHIVE_DELETED && aty != HOT_ARCHIVE_LIVE) + { + throw std::runtime_error( + "Malformed bucket: expected DELETED/LIVE key."); + } + + if (bty == HOT_ARCHIVE_ARCHIVED) + { + return LedgerEntryIdCmp{}(a.key(), b.archivedEntry().data); + } + else + { + if (bty != HOT_ARCHIVE_DELETED && bty != HOT_ARCHIVE_LIVE) + { + throw std::runtime_error("Malformed bucket: expected " + "DELETED/RESTORED key."); + } + return LedgerEntryIdCmp{}(a.key(), b.key()); + } + } + } + + bool + compareLive(BucketEntry const& a, BucketEntry const& b) const { BucketEntryType aty = a.type(); BucketEntryType bty = b.type(); @@ -179,5 +242,18 @@ struct BucketEntryIdCmp } } } + + bool + operator()(BucketEntryT const& a, BucketEntryT const& b) const + { + if constexpr (std::is_same_v) + { + return compareLive(a, b); + } + else + { + return compareHotArchive(a, b); + } + } }; } diff --git a/src/bucket/MergeKey.cpp b/src/bucket/MergeKey.cpp index 74fc5993fb..f3932195f0 100644 --- a/src/bucket/MergeKey.cpp +++ b/src/bucket/MergeKey.cpp @@ -10,25 +10,19 @@ namespace stellar { -MergeKey::MergeKey(bool keepDeadEntries, - std::shared_ptr const& inputCurr, - std::shared_ptr const& inputSnap, - std::vector> const& inputShadows) - : mKeepDeadEntries(keepDeadEntries) - , mInputCurrBucket(inputCurr->getHash()) - , mInputSnapBucket(inputSnap->getHash()) +MergeKey::MergeKey(bool keepTombstoneEntries, Hash const& currHash, + Hash const& snapHash, std::vector const& shadowHashes) + : mKeepTombstoneEntries(keepTombstoneEntries) + , mInputCurrBucket(currHash) + , mInputSnapBucket(snapHash) + , mInputShadowBuckets(shadowHashes) { - mInputShadowBuckets.reserve(inputShadows.size()); - for (auto const& s : inputShadows) - { - mInputShadowBuckets.emplace_back(s->getHash()); - } } bool MergeKey::operator==(MergeKey const& other) const { - return mKeepDeadEntries == other.mKeepDeadEntries && + return mKeepTombstoneEntries == other.mKeepTombstoneEntries && mInputCurrBucket == other.mInputCurrBucket && mInputSnapBucket == other.mInputSnapBucket && mInputShadowBuckets == other.mInputShadowBuckets; @@ -49,7 +43,7 @@ operator<<(std::ostream& out, MergeKey const& b) first = false; out << hexAbbrev(s); } - out << fmt::format(FMT_STRING("], keep={}]"), b.mKeepDeadEntries); + out << fmt::format(FMT_STRING("], keep={}]"), b.mKeepTombstoneEntries); return out; } @@ -68,7 +62,7 @@ size_t hash::operator()(stellar::MergeKey const& key) const noexcept { std::ostringstream oss; - oss << key.mKeepDeadEntries << ',' + oss << key.mKeepTombstoneEntries << ',' << stellar::binToHex(key.mInputCurrBucket) << ',' << stellar::binToHex(key.mInputSnapBucket); for (auto const& e : key.mInputShadowBuckets) diff --git a/src/bucket/MergeKey.h b/src/bucket/MergeKey.h index e9098f26ac..d33a73672b 100644 --- a/src/bucket/MergeKey.h +++ b/src/bucket/MergeKey.h @@ -17,11 +17,10 @@ namespace stellar // pre-resolved std::shared_future containing that output. struct MergeKey { - MergeKey(bool keepDeadEntries, std::shared_ptr const& inputCurr, - std::shared_ptr const& inputSnap, - std::vector> const& inputShadows); + MergeKey(bool keepTombstoneEntries, Hash const& currHash, + Hash const& snapHash, std::vector const& shadowHashes); - bool mKeepDeadEntries; + bool mKeepTombstoneEntries; Hash mInputCurrBucket; Hash mInputSnapBucket; std::vector mInputShadowBuckets; diff --git a/src/bucket/readme.md b/src/bucket/readme.md index 29d4b81bd8..34439828e7 100644 --- a/src/bucket/readme.md +++ b/src/bucket/readme.md @@ -83,8 +83,6 @@ for smaller memory overhead. Because the `BucketIndex`'s must be in memory, there is a tradeoff between BucketList lookup speed and memory overhead. The following configuration flags control these options: -- `DEPRECATED_SQL_LEDGER_STATE` - - When set to false, the `BucketList` is indexed and used for ledger entry lookup - `BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT` - Page size used for `RangeIndex`, where `pageSize == 2^BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT`. diff --git a/src/bucket/test/BucketIndexTests.cpp b/src/bucket/test/BucketIndexTests.cpp index 0a045e7762..e83e7922c5 100644 --- a/src/bucket/test/BucketIndexTests.cpp +++ b/src/bucket/test/BucketIndexTests.cpp @@ -16,6 +16,13 @@ #include "main/Config.h" #include "test/test.h" +#include "util/ProtocolVersion.h" +#include "util/UnorderedMap.h" +#include "util/UnorderedSet.h" +#include "util/XDRCereal.h" +#include "util/types.h" +#include + using namespace stellar; using namespace BucketTestUtils; @@ -69,7 +76,7 @@ class BucketIndexTest {CONFIG_SETTING}, 10); f(entries); closeLedger(*mApp); - } while (!BucketList::levelShouldSpill(ledger, mLevelsToBuild - 1)); + } while (!LiveBucketList::levelShouldSpill(ledger, mLevelsToBuild - 1)); } public: @@ -129,7 +136,7 @@ class BucketIndexTest auto searchableBL = getBM() .getBucketSnapshotManager() - .copySearchableBucketListSnapshot(); + .copySearchableLiveBucketListSnapshot(); auto lk = LedgerEntryKey(canonicalEntry); auto currentLoadedEntry = searchableBL->load(lk); @@ -143,20 +150,18 @@ class BucketIndexTest for (uint32_t currLedger = ledger; currLedger > 0; --currLedger) { - auto [loadRes, snapshotExists] = - searchableBL->loadKeysFromLedger({lk}, currLedger); + auto loadRes = searchableBL->loadKeysFromLedger({lk}, currLedger); // If we query an older snapshot, should return if (currLedger < ledger - mApp->getConfig().QUERY_SNAPSHOT_LEDGERS) { - REQUIRE(!snapshotExists); - REQUIRE(loadRes.empty()); + REQUIRE(!loadRes); } else { - REQUIRE(snapshotExists); - REQUIRE(loadRes.size() == 1); - REQUIRE(loadRes[0].lastModifiedLedgerSeq == currLedger - 1); + REQUIRE(loadRes); + REQUIRE(loadRes->size() == 1); + REQUIRE(loadRes->at(0).lastModifiedLedgerSeq == currLedger - 1); } } } @@ -250,7 +255,7 @@ class BucketIndexTest { auto searchableBL = getBM() .getBucketSnapshotManager() - .copySearchableBucketListSnapshot(); + .copySearchableLiveBucketListSnapshot(); // Test bulk load lookup auto loadResult = @@ -277,7 +282,7 @@ class BucketIndexTest { auto searchableBL = getBM() .getBucketSnapshotManager() - .copySearchableBucketListSnapshot(); + .copySearchableLiveBucketListSnapshot(); for (size_t i = 0; i < n; ++i) { LedgerKeySet searchSubset; @@ -317,7 +322,7 @@ class BucketIndexTest { auto searchableBL = getBM() .getBucketSnapshotManager() - .copySearchableBucketListSnapshot(); + .copySearchableLiveBucketListSnapshot(); // Load should return empty vector for keys not in bucket list auto keysNotInBL = @@ -494,7 +499,7 @@ class BucketIndexPoolShareTest : public BucketIndexTest { auto searchableBL = getBM() .getBucketSnapshotManager() - .copySearchableBucketListSnapshot(); + .copySearchableLiveBucketListSnapshot(); auto loadResult = searchableBL->loadPoolShareTrustLinesByAccountAndAsset( mAccountToSearch.accountID, mAssetToSearch); @@ -508,7 +513,6 @@ testAllIndexTypes(std::function f) SECTION("individual index only") { Config cfg(getTestConfig()); - cfg.DEPRECATED_SQL_LEDGER_STATE = false; cfg.BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT = 0; f(cfg); } @@ -516,7 +520,6 @@ testAllIndexTypes(std::function f) SECTION("individual and range index") { Config cfg(getTestConfig()); - cfg.DEPRECATED_SQL_LEDGER_STATE = false; // First 3 levels individual, last 3 range index cfg.BUCKETLIST_DB_INDEX_CUTOFF = 1; @@ -526,7 +529,6 @@ testAllIndexTypes(std::function f) SECTION("range index only") { Config cfg(getTestConfig()); - cfg.DEPRECATED_SQL_LEDGER_STATE = false; cfg.BUCKETLIST_DB_INDEX_CUTOFF = 0; f(cfg); } @@ -608,7 +610,6 @@ TEST_CASE("serialize bucket indexes", "[bucket][bucketindex]") // All levels use range config cfg.BUCKETLIST_DB_INDEX_CUTOFF = 0; - cfg.DEPRECATED_SQL_LEDGER_STATE = false; cfg.BUCKETLIST_DB_PERSIST_INDEX = true; cfg.INVARIANT_CHECKS = {}; @@ -619,6 +620,7 @@ TEST_CASE("serialize bucket indexes", "[bucket][bucketindex]") auto test = BucketIndexTest(cfg, /*levels=*/3); test.buildGeneralTest(); + std::set liveBuckets; auto buckets = test.getBM().getBucketListReferencedBuckets(); for (auto const& bucketHash : buckets) { @@ -631,7 +633,13 @@ TEST_CASE("serialize bucket indexes", "[bucket][bucketindex]") auto indexFilename = test.getBM().bucketIndexFilename(bucketHash); REQUIRE(fs::exists(indexFilename)); - auto b = test.getBM().getBucketByHash(bucketHash); + auto b = test.getBM().getBucketIfExists(bucketHash); + if (std::dynamic_pointer_cast(b)) + { + liveBuckets.emplace(bucketHash); + } + + REQUIRE(b); REQUIRE(b->isIndexed()); auto onDiskIndex = @@ -657,8 +665,19 @@ TEST_CASE("serialize bucket indexes", "[bucket][bucketindex]") } // Check if in-memory index has correct params - auto b = test.getBM().getBucketByHash(bucketHash); - REQUIRE(!b->isEmpty()); + std::shared_ptr b = nullptr; + if (liveBuckets.find(bucketHash) != liveBuckets.end()) + { + b = test.getBM().getLiveBucketByHash(bucketHash); + + REQUIRE(b); + REQUIRE(!b->isEmpty()); + } + else + { + continue; + } + REQUIRE(b->isIndexed()); auto& inMemoryIndex = b->getIndexForTesting(); @@ -682,4 +701,201 @@ TEST_CASE("serialize bucket indexes", "[bucket][bucketindex]") REQUIRE((inMemoryIndex == *onDiskIndex)); } } + +// The majority of BucketListDB functionality is shared by all bucketlist types. +// This test is a simple sanity check and tests the interface differences +// between the live bucketlist and the hot archive bucketlist. +TEST_CASE("hot archive bucket lookups", "[bucket][bucketindex][archive]") +{ + auto f = [&](Config& cfg) { + auto clock = VirtualClock(); + auto app = createTestApplication(clock, cfg); + + UnorderedMap expectedArchiveEntries; + UnorderedSet expectedDeletedEntries; + UnorderedSet expectedRestoredEntries; + UnorderedSet keysToSearch; + + auto ledger = 1; + + // Use snapshot across ledger to test update behavior + auto searchableBL = app->getBucketManager() + .getBucketSnapshotManager() + .copySearchableHotArchiveBucketListSnapshot(); + + auto checkLoad = [&](LedgerKey const& k, + std::shared_ptr entryPtr) { + // Restored entries should be null + if (expectedRestoredEntries.find(k) != + expectedRestoredEntries.end()) + { + REQUIRE(!entryPtr); + } + + // Deleted entries should be HotArchiveBucketEntry of type + // DELETED + else if (expectedDeletedEntries.find(k) != + expectedDeletedEntries.end()) + { + REQUIRE(entryPtr); + REQUIRE(entryPtr->type() == + HotArchiveBucketEntryType::HOT_ARCHIVE_DELETED); + REQUIRE(entryPtr->key() == k); + } + + // Archived entries should contain full LedgerEntry + else + { + auto expectedIter = expectedArchiveEntries.find(k); + REQUIRE(expectedIter != expectedArchiveEntries.end()); + REQUIRE(entryPtr); + REQUIRE(entryPtr->type() == + HotArchiveBucketEntryType::HOT_ARCHIVE_ARCHIVED); + REQUIRE(entryPtr->archivedEntry() == expectedIter->second); + } + }; + + auto checkResult = [&] { + LedgerKeySet bulkLoadKeys; + for (auto const& k : keysToSearch) + { + auto entryPtr = searchableBL->load(k); + checkLoad(k, entryPtr); + bulkLoadKeys.emplace(k); + } + + auto bulkLoadResult = searchableBL->loadKeys(bulkLoadKeys); + for (auto entry : bulkLoadResult) + { + if (entry.type() == HOT_ARCHIVE_DELETED) + { + auto k = entry.key(); + auto iter = expectedDeletedEntries.find(k); + REQUIRE(iter != expectedDeletedEntries.end()); + expectedDeletedEntries.erase(iter); + } + else + { + REQUIRE(entry.type() == HOT_ARCHIVE_ARCHIVED); + auto le = entry.archivedEntry(); + auto k = LedgerEntryKey(le); + auto iter = expectedArchiveEntries.find(k); + REQUIRE(iter != expectedArchiveEntries.end()); + REQUIRE(iter->second == le); + expectedArchiveEntries.erase(iter); + } + } + + REQUIRE(expectedDeletedEntries.empty()); + REQUIRE(expectedArchiveEntries.empty()); + }; + + auto archivedEntries = + LedgerTestUtils::generateValidUniqueLedgerEntriesWithTypes( + {CONTRACT_DATA, CONTRACT_CODE}, 10); + for (auto const& e : archivedEntries) + { + auto k = LedgerEntryKey(e); + expectedArchiveEntries.emplace(k, e); + keysToSearch.emplace(k); + } + + // Note: keys to search automatically populated by these functions + auto deletedEntries = + LedgerTestUtils::generateValidUniqueLedgerKeysWithTypes( + {CONTRACT_DATA, CONTRACT_CODE}, 10, keysToSearch); + for (auto const& k : deletedEntries) + { + expectedDeletedEntries.emplace(k); + } + + auto restoredEntries = + LedgerTestUtils::generateValidUniqueLedgerKeysWithTypes( + {CONTRACT_DATA, CONTRACT_CODE}, 10, keysToSearch); + for (auto const& k : restoredEntries) + { + expectedRestoredEntries.emplace(k); + } + + auto header = + app->getLedgerManager().getLastClosedLedgerHeader().header; + header.ledgerSeq += 1; + header.ledgerVersion = static_cast( + Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION); + addHotArchiveBatchAndUpdateSnapshot(*app, header, archivedEntries, + restoredEntries, deletedEntries); + checkResult(); + + // Add a few batches so that entries are no longer in the top bucket + for (auto i = 0; i < 100; ++i) + { + header.ledgerSeq += 1; + addHotArchiveBatchAndUpdateSnapshot(*app, header, {}, {}, {}); + } + + // Shadow entries via liveEntry + auto liveShadow1 = LedgerEntryKey(archivedEntries[0]); + auto liveShadow2 = deletedEntries[1]; + + header.ledgerSeq += 1; + addHotArchiveBatchAndUpdateSnapshot(*app, header, {}, + {liveShadow1, liveShadow2}, {}); + + // Point load + for (auto const& k : {liveShadow1, liveShadow2}) + { + auto entryPtr = searchableBL->load(k); + REQUIRE(!entryPtr); + } + + // Bulk load + auto bulkLoadResult = + searchableBL->loadKeys({liveShadow1, liveShadow2}); + REQUIRE(bulkLoadResult.size() == 0); + + // Shadow via deletedEntry + auto deletedShadow = LedgerEntryKey(archivedEntries[1]); + + header.ledgerSeq += 1; + addHotArchiveBatchAndUpdateSnapshot(*app, header, {}, {}, + {deletedShadow}); + + // Point load + auto entryPtr = searchableBL->load(deletedShadow); + REQUIRE(entryPtr); + REQUIRE(entryPtr->type() == + HotArchiveBucketEntryType::HOT_ARCHIVE_DELETED); + REQUIRE(entryPtr->key() == deletedShadow); + + // Bulk load + auto bulkLoadResult2 = searchableBL->loadKeys({deletedShadow}); + REQUIRE(bulkLoadResult2.size() == 1); + REQUIRE(bulkLoadResult2[0].type() == HOT_ARCHIVE_DELETED); + REQUIRE(bulkLoadResult2[0].key() == deletedShadow); + + // Shadow via archivedEntry + auto archivedShadow = archivedEntries[3]; + archivedShadow.lastModifiedLedgerSeq = ledger; + + header.ledgerSeq += 1; + addHotArchiveBatchAndUpdateSnapshot(*app, header, {archivedShadow}, {}, + {}); + + // Point load + entryPtr = searchableBL->load(LedgerEntryKey(archivedShadow)); + REQUIRE(entryPtr); + REQUIRE(entryPtr->type() == + HotArchiveBucketEntryType::HOT_ARCHIVE_ARCHIVED); + REQUIRE(entryPtr->archivedEntry() == archivedShadow); + + // Bulk load + auto bulkLoadResult3 = + searchableBL->loadKeys({LedgerEntryKey(archivedShadow)}); + REQUIRE(bulkLoadResult3.size() == 1); + REQUIRE(bulkLoadResult3[0].type() == HOT_ARCHIVE_ARCHIVED); + REQUIRE(bulkLoadResult3[0].archivedEntry() == archivedShadow); + }; + + testAllIndexTypes(f); +} } diff --git a/src/bucket/test/BucketListTests.cpp b/src/bucket/test/BucketListTests.cpp index 2730efcca6..e4c4c1c1df 100644 --- a/src/bucket/test/BucketListTests.cpp +++ b/src/bucket/test/BucketListTests.cpp @@ -29,6 +29,8 @@ #include "util/Math.h" #include "util/ProtocolVersion.h" #include "util/Timer.h" +#include "util/UnorderedSet.h" +#include "xdr/Stellar-ledger.h" #include "xdrpp/autocheck.h" #include @@ -67,29 +69,29 @@ highBoundInclusive(uint32_t level, uint32_t ledger) } void -checkBucketSizeAndBounds(BucketList& bl, uint32_t ledgerSeq, uint32_t level, +checkBucketSizeAndBounds(LiveBucketList& bl, uint32_t ledgerSeq, uint32_t level, bool isCurr) { - std::shared_ptr bucket; + std::shared_ptr bucket; uint32_t sizeOfBucket = 0; uint32_t oldestLedger = 0; if (isCurr) { bucket = bl.getLevel(level).getCurr(); - sizeOfBucket = BucketList::sizeOfCurr(ledgerSeq, level); - oldestLedger = BucketList::oldestLedgerInCurr(ledgerSeq, level); + sizeOfBucket = LiveBucketList::sizeOfCurr(ledgerSeq, level); + oldestLedger = LiveBucketList::oldestLedgerInCurr(ledgerSeq, level); } else { bucket = bl.getLevel(level).getSnap(); - sizeOfBucket = BucketList::sizeOfSnap(ledgerSeq, level); - oldestLedger = BucketList::oldestLedgerInSnap(ledgerSeq, level); + sizeOfBucket = LiveBucketList::sizeOfSnap(ledgerSeq, level); + oldestLedger = LiveBucketList::oldestLedgerInSnap(ledgerSeq, level); } std::set ledgers; uint32_t lbound = std::numeric_limits::max(); uint32_t ubound = 0; - for (BucketInputIterator iter(bucket); iter; ++iter) + for (LiveBucketInputIterator iter(bucket); iter; ++iter) { auto lastModified = (*iter).liveEntry().lastModifiedLedgerSeq; ledgers.insert(lastModified); @@ -129,66 +131,104 @@ binarySearchForLedger(uint32_t lbound, uint32_t ubound, using namespace BucketListTests; -TEST_CASE_VERSIONS("bucket list", "[bucket][bucketlist]") +template +static void +basicBucketListTest() { VirtualClock clock; Config const& cfg = getTestConfig(); - try - { - for_versions_with_differing_bucket_logic(cfg, [&](Config const& cfg) { + + auto test = [&](Config const& cfg) { + try + { Application::pointer app = createTestApplication(clock, cfg); - BucketList bl; + BucketListT bl; CLOG_DEBUG(Bucket, "Adding batches to bucket list"); + + UnorderedSet seenKeys; for (uint32_t i = 1; !app->getClock().getIOContext().stopped() && i < 130; ++i) { app->getClock().crank(false); - auto lh = - app->getLedgerManager().getLastClosedLedgerHeader().header; - lh.ledgerSeq = i; - addBatchAndUpdateSnapshot( - bl, *app, lh, {}, - LedgerTestUtils:: - generateValidUniqueLedgerEntriesWithExclusions( - {CONFIG_SETTING}, 8), - LedgerTestUtils::generateValidLedgerEntryKeysWithExclusions( - {CONFIG_SETTING}, 5)); + if constexpr (std::is_same_v) + { + bl.addBatch( + *app, i, getAppLedgerVersion(app), {}, + LedgerTestUtils::generateValidUniqueLedgerEntries(8), + LedgerTestUtils:: + generateValidLedgerEntryKeysWithExclusions( + {CONFIG_SETTING}, 5)); + } + else + { + bl.addBatch( + *app, i, getAppLedgerVersion(app), {}, + LedgerTestUtils::generateValidUniqueLedgerKeysWithTypes( + {CONTRACT_CODE, CONTRACT_DATA}, 8, seenKeys), + LedgerTestUtils::generateValidUniqueLedgerKeysWithTypes( + {CONTRACT_CODE, CONTRACT_DATA}, 5, seenKeys)); + } + if (i % 10 == 0) CLOG_DEBUG(Bucket, "Added batch {}, hash={}", i, binToHex(bl.getHash())); - for (uint32_t j = 0; j < BucketList::kNumLevels; ++j) + for (uint32_t j = 0; j < BucketListT::kNumLevels; ++j) { auto const& lev = bl.getLevel(j); auto currSz = countEntries(lev.getCurr()); auto snapSz = countEntries(lev.getSnap()); - CHECK(currSz <= BucketList::levelHalf(j) * 100); - CHECK(snapSz <= BucketList::levelHalf(j) * 100); + CHECK(currSz <= BucketListT::levelHalf(j) * 100); + CHECK(snapSz <= BucketListT::levelHalf(j) * 100); } } - }); + } + catch (std::future_error& e) + { + CLOG_DEBUG(Bucket, "Test caught std::future_error {}: {}", + e.code().value(), e.what()); + REQUIRE(false); + } + }; + + if constexpr (std::is_same_v) + { + for_versions_with_differing_bucket_logic(cfg, test); } - catch (std::future_error& e) + else { - CLOG_DEBUG(Bucket, "Test caught std::future_error {}: {}", - e.code().value(), e.what()); - REQUIRE(false); + for_versions_from(23, cfg, test); } } -TEST_CASE("bucketUpdatePeriod arithmetic", "[bucket][bucketlist]") +TEST_CASE_VERSIONS("bucket list", "[bucket][bucketlist]") +{ + SECTION("live bl") + { + basicBucketListTest(); + } + + SECTION("hot archive bl") + { + basicBucketListTest(); + } +} + +template +static void +updatePeriodTest() { std::map currCalculatedUpdatePeriods; std::map snapCalculatedUpdatePeriods; - for (uint32_t i = 0; i < BucketList::kNumLevels; ++i) + for (uint32_t i = 0; i < BucketListT::kNumLevels; ++i) { currCalculatedUpdatePeriods.emplace( - i, BucketList::bucketUpdatePeriod(i, /*isCurr=*/true)); + i, BucketListT::bucketUpdatePeriod(i, /*isCurr=*/true)); // Last level has no snap - if (i != BucketList::kNumLevels - 1) + if (i != BucketListT::kNumLevels - 1) { snapCalculatedUpdatePeriods.emplace( - i, BucketList::bucketUpdatePeriod(i, /*isSnap=*/false)); + i, BucketListT::bucketUpdatePeriod(i, /*isSnap=*/false)); } } @@ -197,7 +237,7 @@ TEST_CASE("bucketUpdatePeriod arithmetic", "[bucket][bucketlist]") !snapCalculatedUpdatePeriods.empty(); ++ledgerSeq) { - for (uint32_t level = 0; level < BucketList::kNumLevels; ++level) + for (uint32_t level = 0; level < BucketListT::kNumLevels; ++level) { // Check if curr bucket is updated auto currIter = currCalculatedUpdatePeriods.find(level); @@ -213,7 +253,7 @@ TEST_CASE("bucketUpdatePeriod arithmetic", "[bucket][bucketlist]") { // For all other levels, an update occurs when the level // above spills - if (BucketList::levelShouldSpill(ledgerSeq, level - 1)) + if (BucketListT::levelShouldSpill(ledgerSeq, level - 1)) { REQUIRE(currIter->second == ledgerSeq); currCalculatedUpdatePeriods.erase(currIter); @@ -225,7 +265,7 @@ TEST_CASE("bucketUpdatePeriod arithmetic", "[bucket][bucketlist]") auto snapIter = snapCalculatedUpdatePeriods.find(level); if (snapIter != snapCalculatedUpdatePeriods.end()) { - if (BucketList::levelShouldSpill(ledgerSeq, level)) + if (BucketListT::levelShouldSpill(ledgerSeq, level)) { // Check that snap bucket calculation is correct REQUIRE(snapIter->second == ledgerSeq); @@ -236,6 +276,19 @@ TEST_CASE("bucketUpdatePeriod arithmetic", "[bucket][bucketlist]") } } +TEST_CASE("bucketUpdatePeriod arithmetic", "[bucket][bucketlist]") +{ + SECTION("live bl") + { + updatePeriodTest(); + } + + SECTION("hot archive bl") + { + updatePeriodTest(); + } +} + TEST_CASE_VERSIONS("bucket list shadowing pre/post proto 12", "[bucket][bucketlist]") { @@ -243,7 +296,7 @@ TEST_CASE_VERSIONS("bucket list shadowing pre/post proto 12", Config const& cfg = getTestConfig(); for_versions_with_differing_bucket_logic(cfg, [&](Config const& cfg) { Application::pointer app = createTestApplication(clock, cfg); - BucketList bl; + LiveBucketList bl; // Alice and Bob change in every iteration. auto alice = LedgerTestUtils::generateValidAccountEntry(5); @@ -258,8 +311,7 @@ TEST_CASE_VERSIONS("bucket list shadowing pre/post proto 12", { app->getClock().crank(false); auto liveBatch = - LedgerTestUtils::generateValidUniqueLedgerEntriesWithExclusions( - {CONFIG_SETTING}, 5); + LedgerTestUtils::generateValidUniqueLedgerEntries(5); BucketEntry BucketEntryAlice, BucketEntryBob; alice.balance++; @@ -274,11 +326,8 @@ TEST_CASE_VERSIONS("bucket list shadowing pre/post proto 12", BucketEntryBob.liveEntry().data.account() = bob; liveBatch.push_back(BucketEntryBob.liveEntry()); - auto lh = - app->getLedgerManager().getLastClosedLedgerHeader().header; - lh.ledgerSeq = i; - addBatchAndUpdateSnapshot( - bl, *app, lh, {}, liveBatch, + bl.addBatch( + *app, i, getAppLedgerVersion(app), {}, liveBatch, LedgerTestUtils::generateValidLedgerEntryKeysWithExclusions( {CONFIG_SETTING}, 5)); if (i % 100 == 0) @@ -304,7 +353,7 @@ TEST_CASE_VERSIONS("bucket list shadowing pre/post proto 12", // Alice and Bob should never occur in level 2 .. N because they // were shadowed in level 0 continuously. - for (uint32_t j = 2; j < BucketList::kNumLevels; ++j) + for (uint32_t j = 2; j < LiveBucketList::kNumLevels; ++j) { auto const& lev = bl.getLevel(j); auto curr = lev.getCurr(); @@ -317,7 +366,7 @@ TEST_CASE_VERSIONS("bucket list shadowing pre/post proto 12", snap->containsBucketIdentity(BucketEntryBob)); if (protocolVersionIsBefore( app->getConfig().LEDGER_PROTOCOL_VERSION, - Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED) || + LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED) || j > 5) { CHECK(!hasAlice); @@ -337,7 +386,74 @@ TEST_CASE_VERSIONS("bucket list shadowing pre/post proto 12", }); } -TEST_CASE_VERSIONS("bucket tombstones expire at bottom level", +TEST_CASE_VERSIONS("hot archive bucket tombstones expire at bottom level", + "[bucket][bucketlist][tombstones]") +{ + VirtualClock clock; + Config const& cfg = getTestConfig(); + + testutil::BucketListDepthModifier bldm(5); + auto app = createTestApplication(clock, cfg); + for_versions_from(23, *app, [&] { + HotArchiveBucketList bl; + + auto lastSnapSize = [&] { + auto& level = bl.getLevel(HotArchiveBucketList::kNumLevels - 2); + return countEntries(level.getSnap()); + }; + + auto countNonBottomLevelEntries = [&] { + auto size = 0; + for (uint32_t i = 0; i < HotArchiveBucketList::kNumLevels - 1; ++i) + { + auto& level = bl.getLevel(i); + size += countEntries(level.getCurr()); + size += countEntries(level.getSnap()); + } + return size; + }; + + // Populate a BucketList so everything but the bottom level is full. + UnorderedSet keys; + auto numExpectedEntries = 0; + auto ledger = 1; + while (lastSnapSize() == 0) + { + bl.addBatch(*app, ledger, getAppLedgerVersion(app), {}, + LedgerTestUtils::generateValidUniqueLedgerKeysWithTypes( + {CONTRACT_CODE, CONTRACT_DATA}, 5, keys), + LedgerTestUtils::generateValidUniqueLedgerKeysWithTypes( + {CONTRACT_CODE, CONTRACT_DATA}, 5, keys)); + + // Once all entries merge to the bottom level, only deleted entries + // should remain + numExpectedEntries += 5; + + ++ledger; + } + + // Close ledgers until all entries have merged into the bottom level + // bucket + while (countNonBottomLevelEntries() != 0) + { + bl.addBatch(*app, ledger, getAppLedgerVersion(app), {}, {}, {}); + ++ledger; + } + + auto bottomCurr = + bl.getLevel(HotArchiveBucketList::kNumLevels - 1).getCurr(); + REQUIRE(countEntries(bottomCurr) == numExpectedEntries); + + for (HotArchiveBucketInputIterator iter(bottomCurr); iter; ++iter) + { + auto be = *iter; + REQUIRE(be.type() == HOT_ARCHIVE_DELETED); + REQUIRE(keys.find(be.key()) != keys.end()); + } + }); +} + +TEST_CASE_VERSIONS("live bucket tombstones expire at bottom level", "[bucket][bucketlist][tombstones]") { VirtualClock clock; @@ -345,50 +461,43 @@ TEST_CASE_VERSIONS("bucket tombstones expire at bottom level", for_versions_with_differing_bucket_logic(cfg, [&](Config const& cfg) { Application::pointer app = createTestApplication(clock, cfg); - BucketList bl; + LiveBucketList bl; BucketManager& bm = app->getBucketManager(); auto& mergeTimer = bm.getMergeTimer(); CLOG_INFO(Bucket, "Establishing random bucketlist"); - for (uint32_t i = 0; i < BucketList::kNumLevels; ++i) + for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) { auto& level = bl.getLevel(i); - level.setCurr(Bucket::fresh( + level.setCurr(LiveBucket::fresh( bm, getAppLedgerVersion(app), {}, - LedgerTestUtils::generateValidUniqueLedgerEntriesWithExclusions( - {CONFIG_SETTING}, 8), + LedgerTestUtils::generateValidUniqueLedgerEntries(8), LedgerTestUtils::generateValidLedgerEntryKeysWithExclusions( {CONFIG_SETTING}, 5), /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true)); - level.setSnap(Bucket::fresh( + level.setSnap(LiveBucket::fresh( bm, getAppLedgerVersion(app), {}, - LedgerTestUtils::generateValidUniqueLedgerEntriesWithExclusions( - {CONFIG_SETTING}, 8), + LedgerTestUtils::generateValidUniqueLedgerEntries(8), LedgerTestUtils::generateValidLedgerEntryKeysWithExclusions( {CONFIG_SETTING}, 5), /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true)); } - for (uint32_t i = 0; i < BucketList::kNumLevels; ++i) + for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) { - std::vector ledgers = {BucketList::levelHalf(i), - BucketList::levelSize(i)}; + std::vector ledgers = {LiveBucketList::levelHalf(i), + LiveBucketList::levelSize(i)}; for (auto j : ledgers) { auto n = mergeTimer.count(); - auto lh = - app->getLedgerManager().getLastClosedLedgerHeader().header; - lh.ledgerSeq = j; - addBatchAndUpdateSnapshot( - bl, *app, lh, {}, - LedgerTestUtils:: - generateValidUniqueLedgerEntriesWithExclusions( - {CONFIG_SETTING}, 8), + bl.addBatch( + *app, j, getAppLedgerVersion(app), {}, + LedgerTestUtils::generateValidUniqueLedgerEntries(8), LedgerTestUtils::generateValidLedgerEntryKeysWithExclusions( {CONFIG_SETTING}, 5)); app->getClock().crank(false); - for (uint32_t k = 0u; k < BucketList::kNumLevels; ++k) + for (uint32_t k = 0u; k < LiveBucketList::kNumLevels; ++k) { auto& next = bl.getLevel(k).getNext(); if (next.isLive()) @@ -401,13 +510,13 @@ TEST_CASE_VERSIONS("bucket tombstones expire at bottom level", "Added batch at ledger {}, merges provoked: {}", j, n); REQUIRE(n > 0); - REQUIRE(n < 2 * BucketList::kNumLevels); + REQUIRE(n < 2 * LiveBucketList::kNumLevels); } } - EntryCounts e0(bl.getLevel(BucketList::kNumLevels - 3).getCurr()); - EntryCounts e1(bl.getLevel(BucketList::kNumLevels - 2).getCurr()); - EntryCounts e2(bl.getLevel(BucketList::kNumLevels - 1).getCurr()); + EntryCounts e0(bl.getLevel(LiveBucketList::kNumLevels - 3).getCurr()); + EntryCounts e1(bl.getLevel(LiveBucketList::kNumLevels - 2).getCurr()); + EntryCounts e2(bl.getLevel(LiveBucketList::kNumLevels - 1).getCurr()); REQUIRE(e0.nDead != 0); REQUIRE(e1.nDead != 0); REQUIRE(e2.nDead == 0); @@ -422,7 +531,8 @@ TEST_CASE_VERSIONS("bucket tombstones mutually-annihilate init entries", for_versions_with_differing_bucket_logic(cfg, [&](Config const& cfg) { Application::pointer app = createTestApplication(clock, cfg); - BucketList bl; + LiveBucketList bl; + auto vers = getAppLedgerVersion(app); autocheck::generator flip; std::deque entriesToModify; for (uint32_t i = 1; i < 512; ++i) @@ -458,13 +568,9 @@ TEST_CASE_VERSIONS("bucket tombstones mutually-annihilate init entries", deadEntries.push_back(LedgerEntryKey(e)); } } - auto lh = - app->getLedgerManager().getLastClosedLedgerHeader().header; - lh.ledgerSeq = i; - addBatchAndUpdateSnapshot(bl, *app, lh, initEntries, liveEntries, - deadEntries); + bl.addBatch(*app, i, vers, initEntries, liveEntries, deadEntries); app->getClock().crank(false); - for (uint32_t k = 0u; k < BucketList::kNumLevels; ++k) + for (uint32_t k = 0u; k < LiveBucketList::kNumLevels; ++k) { auto& next = bl.getLevel(k).getNext(); if (next.isLive()) @@ -473,14 +579,15 @@ TEST_CASE_VERSIONS("bucket tombstones mutually-annihilate init entries", } } } - for (uint32_t k = 0u; k < BucketList::kNumLevels; ++k) + for (uint32_t k = 0u; k < LiveBucketList::kNumLevels; ++k) { auto const& lev = bl.getLevel(k); auto currSz = countEntries(lev.getCurr()); auto snapSz = countEntries(lev.getSnap()); if (protocolVersionStartsFrom( cfg.LEDGER_PROTOCOL_VERSION, - Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY)) + LiveBucket:: + FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY)) { // init/dead pairs should mutually-annihilate pretty readily as // they go, empirically this test peaks at buckets around 400 @@ -501,35 +608,29 @@ TEST_CASE_VERSIONS("single entry bubbling up", { for_versions_with_differing_bucket_logic(cfg, [&](Config const& cfg) { Application::pointer app = createTestApplication(clock, cfg); - BucketList bl; + LiveBucketList bl; std::vector emptySet; std::vector emptySetEntry; CLOG_DEBUG(Bucket, "Adding single entry in lowest level"); - addBatchAndUpdateSnapshot( - bl, *app, - app->getLedgerManager().getLastClosedLedgerHeader().header, {}, - LedgerTestUtils::generateValidLedgerEntriesWithExclusions( - {CONFIG_SETTING}, 1), - emptySet); + bl.addBatch(*app, 1, getAppLedgerVersion(app), {}, + LedgerTestUtils::generateValidLedgerEntries(1), + emptySet); CLOG_DEBUG(Bucket, "Adding empty batches to bucket list"); for (uint32_t i = 2; !app->getClock().getIOContext().stopped() && i < 300; ++i) { app->getClock().crank(false); - auto lh = - app->getLedgerManager().getLastClosedLedgerHeader().header; - lh.ledgerSeq = i; - addBatchAndUpdateSnapshot(bl, *app, lh, {}, emptySetEntry, - emptySet); + bl.addBatch(*app, i, getAppLedgerVersion(app), {}, + emptySetEntry, emptySet); if (i % 10 == 0) CLOG_DEBUG(Bucket, "Added batch {}, hash={}", i, binToHex(bl.getHash())); CLOG_DEBUG(Bucket, "------- ledger {}", i); - for (uint32_t j = 0; j <= BucketList::kNumLevels - 1; ++j) + for (uint32_t j = 0; j <= LiveBucketList::kNumLevels - 1; ++j) { uint32_t lb = lowBoundExclusive(j, i); uint32_t hb = highBoundInclusive(j, i); @@ -561,31 +662,32 @@ TEST_CASE_VERSIONS("single entry bubbling up", } } -TEST_CASE("BucketList sizeOf and oldestLedgerIn relations", - "[bucket][bucketlist][count]") +template +static void +sizeOfTests() { stellar::uniform_int_distribution dist; for (uint32_t i = 0; i < 1000; ++i) { - for (uint32_t level = 0; level < BucketList::kNumLevels; ++level) + for (uint32_t level = 0; level < BucketListT::kNumLevels; ++level) { uint32_t ledger = dist(gRandomEngine); - if (BucketList::sizeOfSnap(ledger, level) > 0) + if (BucketListT::sizeOfSnap(ledger, level) > 0) { uint32_t oldestInCurr = - BucketList::oldestLedgerInSnap(ledger, level) + - BucketList::sizeOfSnap(ledger, level); + BucketListT::oldestLedgerInSnap(ledger, level) + + BucketListT::sizeOfSnap(ledger, level); REQUIRE(oldestInCurr == - BucketList::oldestLedgerInCurr(ledger, level)); + BucketListT::oldestLedgerInCurr(ledger, level)); } - if (BucketList::sizeOfCurr(ledger, level) > 0) + if (BucketListT::sizeOfCurr(ledger, level) > 0) { uint32_t newestInCurr = - BucketList::oldestLedgerInCurr(ledger, level) + - BucketList::sizeOfCurr(ledger, level) - 1; + BucketListT::oldestLedgerInCurr(ledger, level) + + BucketListT::sizeOfCurr(ledger, level) - 1; REQUIRE(newestInCurr == (level == 0 ? ledger - : BucketList::oldestLedgerInSnap( + : BucketListT::oldestLedgerInSnap( ledger, level - 1) - 1)); } @@ -593,13 +695,29 @@ TEST_CASE("BucketList sizeOf and oldestLedgerIn relations", } } -TEST_CASE("BucketList snap reaches steady state", "[bucket][bucketlist][count]") +TEST_CASE("BucketList sizeOf and oldestLedgerIn relations", + "[bucket][bucketlist][count]") +{ + SECTION("live bl") + { + sizeOfTests(); + } + + SECTION("hot archive bl") + { + sizeOfTests(); + } +} + +template +static void +snapSteadyStateTest() { // Deliberately exclude deepest level since snap on the deepest level // is always empty. - for (uint32_t level = 0; level < BucketList::kNumLevels - 1; ++level) + for (uint32_t level = 0; level < BucketListT::kNumLevels - 1; ++level) { - uint32_t const half = BucketList::levelHalf(level); + uint32_t const half = BucketListT::levelHalf(level); // Use binary search (assuming that it does reach steady state) // to find the ledger where the snap at this level first reaches @@ -607,7 +725,7 @@ TEST_CASE("BucketList snap reaches steady state", "[bucket][bucketlist][count]") uint32_t boundary = binarySearchForLedger( 1, std::numeric_limits::max() / 2, [level, half](uint32_t ledger) { - return (BucketList::sizeOfSnap(ledger, level) == half); + return (BucketListT::sizeOfSnap(ledger, level) == half); }); // Generate random ledgers above and below the split to test that @@ -618,21 +736,36 @@ TEST_CASE("BucketList snap reaches steady state", "[bucket][bucketlist][count]") { uint32_t low = distLow(gRandomEngine); uint32_t high = distHigh(gRandomEngine); - REQUIRE(BucketList::sizeOfSnap(low, level) < half); - REQUIRE(BucketList::sizeOfSnap(high, level) == half); + REQUIRE(BucketListT::sizeOfSnap(low, level) < half); + REQUIRE(BucketListT::sizeOfSnap(high, level) == half); } } } -TEST_CASE("BucketList deepest curr accumulates", "[bucket][bucketlist][count]") +TEST_CASE("BucketList snap reaches steady state", "[bucket][bucketlist][count]") +{ + SECTION("live bl") + { + snapSteadyStateTest(); + } + + SECTION("hot archive bl") + { + snapSteadyStateTest(); + } +} + +template +static void +deepestCurrTest() { - uint32_t const deepest = BucketList::kNumLevels - 1; + uint32_t const deepest = BucketListT::kNumLevels - 1; // Use binary search to find the first ledger where the deepest curr // first is non-empty. uint32_t boundary = binarySearchForLedger( 1, std::numeric_limits::max() / 2, [deepest](uint32_t ledger) { - return (BucketList::sizeOfCurr(ledger, deepest) > 0); + return (BucketListT::sizeOfCurr(ledger, deepest) > 0); }); stellar::uniform_int_distribution distLow(1, boundary - 1); stellar::uniform_int_distribution distHigh(boundary); @@ -640,29 +773,57 @@ TEST_CASE("BucketList deepest curr accumulates", "[bucket][bucketlist][count]") { uint32_t low = distLow(gRandomEngine); uint32_t high = distHigh(gRandomEngine); - REQUIRE(BucketList::sizeOfCurr(low, deepest) == 0); - REQUIRE(BucketList::oldestLedgerInCurr(low, deepest) == + REQUIRE(BucketListT::sizeOfCurr(low, deepest) == 0); + REQUIRE(BucketListT::oldestLedgerInCurr(low, deepest) == std::numeric_limits::max()); - REQUIRE(BucketList::sizeOfCurr(high, deepest) > 0); - REQUIRE(BucketList::oldestLedgerInCurr(high, deepest) == 1); + REQUIRE(BucketListT::sizeOfCurr(high, deepest) > 0); + REQUIRE(BucketListT::oldestLedgerInCurr(high, deepest) == 1); - REQUIRE(BucketList::sizeOfSnap(low, deepest) == 0); - REQUIRE(BucketList::oldestLedgerInSnap(low, deepest) == + REQUIRE(BucketListT::sizeOfSnap(low, deepest) == 0); + REQUIRE(BucketListT::oldestLedgerInSnap(low, deepest) == std::numeric_limits::max()); - REQUIRE(BucketList::sizeOfSnap(high, deepest) == 0); - REQUIRE(BucketList::oldestLedgerInSnap(high, deepest) == + REQUIRE(BucketListT::sizeOfSnap(high, deepest) == 0); + REQUIRE(BucketListT::oldestLedgerInSnap(high, deepest) == std::numeric_limits::max()); } } +TEST_CASE("BucketList deepest curr accumulates", "[bucket][bucketlist][count]") +{ + SECTION("live bl") + { + deepestCurrTest(); + } + + SECTION("hot archive bl") + { + deepestCurrTest(); + } +} + +template +static void +blSizesAtLedger1Test() +{ + REQUIRE(BucketListT::sizeOfCurr(1, 0) == 1); + REQUIRE(BucketListT::sizeOfSnap(1, 0) == 0); + for (uint32_t level = 1; level < BucketListT::kNumLevels; ++level) + { + REQUIRE(BucketListT::sizeOfCurr(1, level) == 0); + REQUIRE(BucketListT::sizeOfSnap(1, level) == 0); + } +} + TEST_CASE("BucketList sizes at ledger 1", "[bucket][bucketlist][count]") { - REQUIRE(BucketList::sizeOfCurr(1, 0) == 1); - REQUIRE(BucketList::sizeOfSnap(1, 0) == 0); - for (uint32_t level = 1; level < BucketList::kNumLevels; ++level) + SECTION("live bl") { - REQUIRE(BucketList::sizeOfCurr(1, level) == 0); - REQUIRE(BucketList::sizeOfSnap(1, level) == 0); + blSizesAtLedger1Test(); + } + + SECTION("hot archive bl") + { + blSizesAtLedger1Test(); } } @@ -671,7 +832,7 @@ TEST_CASE("BucketList check bucket sizes", "[bucket][bucketlist][count]") VirtualClock clock; Config cfg(getTestConfig()); Application::pointer app = createTestApplication(clock, cfg); - BucketList& bl = app->getBucketManager().getBucketList(); + LiveBucketList& bl = app->getBucketManager().getLiveBucketList(); std::vector emptySet; auto ledgers = LedgerTestUtils::generateValidUniqueLedgerEntriesWithExclusions( @@ -685,10 +846,10 @@ TEST_CASE("BucketList check bucket sizes", "[bucket][bucketlist][count]") auto lh = app->getLedgerManager().getLastClosedLedgerHeader().header; lh.ledgerSeq = ledgerSeq; - addBatchAndUpdateSnapshot(bl, *app, lh, {}, - {ledgers[ledgerSeq - 1]}, emptySet); + addLiveBatchAndUpdateSnapshot(*app, lh, {}, + {ledgers[ledgerSeq - 1]}, emptySet); } - for (uint32_t level = 0; level < BucketList::kNumLevels; ++level) + for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level) { checkBucketSizeAndBounds(bl, ledgerSeq, level, true); checkBucketSizeAndBounds(bl, ledgerSeq, level, false); @@ -699,7 +860,7 @@ TEST_CASE("BucketList check bucket sizes", "[bucket][bucketlist][count]") TEST_CASE_VERSIONS("network config snapshots BucketList size", "[bucketlist]") { VirtualClock clock; - Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS)); + Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY)); cfg.USE_CONFIG_FOR_GENESIS = true; auto app = createTestApplication(clock, cfg); @@ -767,7 +928,7 @@ TEST_CASE_VERSIONS("network config snapshots BucketList size", "[bucketlist]") { correctWindow.pop_front(); correctWindow.push_back( - app->getBucketManager().getBucketList().getSize()); + app->getBucketManager().getLiveBucketList().getSize()); } lm.setNextLedgerEntryBatchForBucketTesting( @@ -786,347 +947,442 @@ TEST_CASE_VERSIONS("network config snapshots BucketList size", "[bucketlist]") }); } -TEST_CASE_VERSIONS("eviction scan", "[bucketlist]") +TEST_CASE_VERSIONS("eviction scan", "[bucketlist][archival]") { VirtualClock clock; Config cfg(getTestConfig()); cfg.USE_CONFIG_FOR_GENESIS = true; - auto test = [&](bool backgroundScan) { - // BucketTestApplication writes directly to BL and circumvents LedgerTxn - // interface, so we have to use BucketListDB for lookups - cfg.DEPRECATED_SQL_LEDGER_STATE = false; - cfg.BACKGROUND_EVICTION_SCAN = backgroundScan; - + auto test = [&](Config& cfg) { auto app = createTestApplication(clock, cfg); - for_versions_from(20, *app, [&] { - LedgerManagerForBucketTests& lm = app->getLedgerManager(); - auto& bm = app->getBucketManager(); - auto& bl = bm.getBucketList(); - - auto& networkCfg = [&]() -> SorobanNetworkConfig& { - LedgerTxn ltx(app->getLedgerTxnRoot()); - return app->getLedgerManager().getMutableSorobanNetworkConfig(); - }(); - - auto& stateArchivalSettings = networkCfg.stateArchivalSettings(); - auto& evictionIter = networkCfg.evictionIterator(); - auto const levelToScan = 3; - uint32_t ledgerSeq = 1; - - stateArchivalSettings.minTemporaryTTL = 1; - stateArchivalSettings.minPersistentTTL = 1; - - // Because this test uses BucketTestApplication, we must manually - // add the Network Config LedgerEntries to the BucketList with - // setNextLedgerEntryBatchForBucketTesting whenever state archival - // settings or the eviction iterator is manually changed - auto getNetworkCfgLE = [&] { - std::vector result; - LedgerEntry sesLE; - sesLE.data.type(CONFIG_SETTING); - sesLE.data.configSetting().configSettingID( - ConfigSettingID::CONFIG_SETTING_STATE_ARCHIVAL); - sesLE.data.configSetting().stateArchivalSettings() = - stateArchivalSettings; - result.emplace_back(sesLE); - - LedgerEntry iterLE; - iterLE.data.type(CONFIG_SETTING); - iterLE.data.configSetting().configSettingID( - ConfigSettingID::CONFIG_SETTING_EVICTION_ITERATOR); - iterLE.data.configSetting().evictionIterator() = evictionIter; - result.emplace_back(iterLE); - - return result; - }; - auto updateNetworkCfg = [&] { - lm.setNextLedgerEntryBatchForBucketTesting( - {}, getNetworkCfgLE(), {}); - closeLedger(*app); - ++ledgerSeq; - }; + bool tempOnly = protocolVersionIsBefore( + cfg.TESTING_UPGRADE_LEDGER_PROTOCOL_VERSION, + Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION); - auto checkIfEntryExists = [&](std::set const& keys, - bool shouldExist) { - LedgerTxn ltx(app->getLedgerTxnRoot()); - for (auto const& key : keys) - { - auto txle = ltx.loadWithoutRecord(key); - REQUIRE(static_cast(txle) == shouldExist); + LedgerManagerForBucketTests& lm = app->getLedgerManager(); + auto& bm = app->getBucketManager(); + auto& bl = bm.getLiveBucketList(); + + auto& networkCfg = [&]() -> SorobanNetworkConfig& { + LedgerTxn ltx(app->getLedgerTxnRoot()); + return app->getLedgerManager().getMutableSorobanNetworkConfig(); + }(); + + auto& stateArchivalSettings = networkCfg.stateArchivalSettings(); + auto& evictionIter = networkCfg.evictionIterator(); + auto const levelToScan = 3; + uint32_t ledgerSeq = 1; + + stateArchivalSettings.minTemporaryTTL = 1; + stateArchivalSettings.minPersistentTTL = 1; + + // Because this test uses BucketTestApplication, we must manually + // add the Network Config LedgerEntries to the BucketList with + // setNextLedgerEntryBatchForBucketTesting whenever state archival + // settings or the eviction iterator is manually changed + auto getNetworkCfgLE = [&] { + std::vector result; + LedgerEntry sesLE; + sesLE.data.type(CONFIG_SETTING); + sesLE.data.configSetting().configSettingID( + ConfigSettingID::CONFIG_SETTING_STATE_ARCHIVAL); + sesLE.data.configSetting().stateArchivalSettings() = + stateArchivalSettings; + result.emplace_back(sesLE); + + LedgerEntry iterLE; + iterLE.data.type(CONFIG_SETTING); + iterLE.data.configSetting().configSettingID( + ConfigSettingID::CONFIG_SETTING_EVICTION_ITERATOR); + iterLE.data.configSetting().evictionIterator() = evictionIter; + result.emplace_back(iterLE); + + return result; + }; - auto TTLTxle = ltx.loadWithoutRecord(getTTLKey(key)); - REQUIRE(static_cast(TTLTxle) == shouldExist); - } - }; + auto updateNetworkCfg = [&] { + lm.setNextLedgerEntryBatchForBucketTesting({}, getNetworkCfgLE(), + {}); + closeLedger(*app); + ++ledgerSeq; + }; - std::set tempEntries; - std::set persistentEntries; - std::vector entries; - for (auto& e : - LedgerTestUtils::generateValidUniqueLedgerEntriesWithTypes( - {CONTRACT_DATA}, 50)) + auto checkIfEntryExists = [&](std::set const& keys, + bool shouldExist) { + LedgerTxn ltx(app->getLedgerTxnRoot()); + for (auto const& key : keys) { - // Set half of the entries to be persistent, half temporary - if (tempEntries.empty() || rand_flip()) - { - e.data.contractData().durability = TEMPORARY; - tempEntries.emplace(LedgerEntryKey(e)); - } - else - { - e.data.contractData().durability = PERSISTENT; - persistentEntries.emplace(LedgerEntryKey(e)); - } - - LedgerEntry TTLEntry; - TTLEntry.data.type(TTL); - TTLEntry.data.ttl().keyHash = getTTLKey(e).ttl().keyHash; - TTLEntry.data.ttl().liveUntilLedgerSeq = ledgerSeq + 1; + auto txle = ltx.loadWithoutRecord(key); + REQUIRE(static_cast(txle) == shouldExist); - entries.emplace_back(e); - entries.emplace_back(TTLEntry); + auto TTLTxle = ltx.loadWithoutRecord(getTTLKey(key)); + REQUIRE(static_cast(TTLTxle) == shouldExist); } + }; - lm.setNextLedgerEntryBatchForBucketTesting(entries, - getNetworkCfgLE(), {}); - closeLedger(*app); - ++ledgerSeq; + std::set tempEntries; + std::set persistentEntries; + std::vector entries; - // Iterate until entries reach the level where eviction will start - for (; bl.getLevel(levelToScan).getCurr()->isEmpty(); ++ledgerSeq) + for (auto& e : + LedgerTestUtils::generateValidUniqueLedgerEntriesWithTypes( + {CONTRACT_DATA, CONTRACT_CODE}, 50)) + { + if (e.data.type() == CONTRACT_CODE) { - checkIfEntryExists(tempEntries, true); - checkIfEntryExists(persistentEntries, true); - lm.setNextLedgerEntryBatchForBucketTesting({}, {}, {}); - closeLedger(*app); + persistentEntries.emplace(LedgerEntryKey(e)); } - SECTION("basic eviction test") + // Set half of the contact data entries to be persistent, half + // temporary + else if (tempEntries.empty() || rand_flip()) { - // Set eviction to start at level where the entries - // currently are - stateArchivalSettings.startingEvictionScanLevel = levelToScan; - updateNetworkCfg(); + e.data.contractData().durability = TEMPORARY; + tempEntries.emplace(LedgerEntryKey(e)); + } + else + { + e.data.contractData().durability = PERSISTENT; + persistentEntries.emplace(LedgerEntryKey(e)); + } - // All entries should be evicted at once - closeLedger(*app); - ++ledgerSeq; - checkIfEntryExists(tempEntries, false); - checkIfEntryExists(persistentEntries, true); - - auto& entriesEvictedCounter = bm.getEntriesEvictedCounter(); - REQUIRE(entriesEvictedCounter.count() == tempEntries.size()); - - // Close ledgers until evicted DEADENTRYs merge with - // original INITENTRYs. This checks that BucketList - // invariants are respected - for (auto initialDeadMerges = - bm.readMergeCounters() - .mOldInitEntriesMergedWithNewDead; - bm.readMergeCounters().mOldInitEntriesMergedWithNewDead < - initialDeadMerges + tempEntries.size(); - ++ledgerSeq) - { - closeLedger(*app); - } + LedgerEntry TTLEntry; + TTLEntry.data.type(TTL); + TTLEntry.data.ttl().keyHash = getTTLKey(e).ttl().keyHash; + TTLEntry.data.ttl().liveUntilLedgerSeq = ledgerSeq + 1; - REQUIRE(entriesEvictedCounter.count() == tempEntries.size()); - } + entries.emplace_back(e); + entries.emplace_back(TTLEntry); + } - SECTION("shadowed entries not evicted") + lm.setNextLedgerEntryBatchForBucketTesting(entries, getNetworkCfgLE(), + {}); + closeLedger(*app); + ++ledgerSeq; + + // Iterate until entries reach the level where eviction will start + for (; bl.getLevel(levelToScan).getCurr()->isEmpty(); ++ledgerSeq) + { + checkIfEntryExists(tempEntries, true); + checkIfEntryExists(persistentEntries, true); + lm.setNextLedgerEntryBatchForBucketTesting({}, {}, {}); + closeLedger(*app); + } + + auto expectedEvictions = tempEntries.size(); + + if (!tempOnly) + { + expectedEvictions += persistentEntries.size(); + } + + auto checkArchivedBucketList = [&] { + if (!tempOnly) { - // Set eviction to start at level where the entries - // currently are - stateArchivalSettings.startingEvictionScanLevel = levelToScan; - updateNetworkCfg(); + auto archiveSnapshot = + bm.getBucketSnapshotManager() + .copySearchableHotArchiveBucketListSnapshot(); - // Shadow non-live entries with updated, live versions - for (auto& e : entries) + // Check that persisted entries have been inserted into + // HotArchive + for (auto const& k : persistentEntries) { - // Only need to update TTLEntries - if (e.data.type() == TTL) + auto archivedEntry = archiveSnapshot->load(k); + REQUIRE(archivedEntry); + + auto seen = false; + for (auto const& e : entries) { - e.data.ttl().liveUntilLedgerSeq = ledgerSeq + 10; + if (e == archivedEntry->archivedEntry()) + { + seen = true; + break; + } } - } - lm.setNextLedgerEntryBatchForBucketTesting({}, entries, {}); + REQUIRE(seen); - // Close two ledgers to give eviction scan opportunity to - // process new entries - closeLedger(*app); - closeLedger(*app); + // Make sure TTL keys are not archived + auto ttl = getTTLKey(k); + auto archivedTTL = archiveSnapshot->load(ttl); + REQUIRE(!archivedTTL); + } - // Entries are shadowed, should not be evicted - checkIfEntryExists(tempEntries, true); - checkIfEntryExists(persistentEntries, true); + // Temp entries should not be archived + for (auto const& k : tempEntries) + { + auto archivedEntry = archiveSnapshot->load(k); + REQUIRE(!archivedEntry); + } } + }; - SECTION("maxEntriesToArchive") + SECTION("basic eviction test") + { + // Set eviction to start at level where the entries + // currently are + stateArchivalSettings.startingEvictionScanLevel = levelToScan; + updateNetworkCfg(); + + // All entries should be evicted at once + closeLedger(*app); + ++ledgerSeq; + checkIfEntryExists(tempEntries, false); + checkIfEntryExists(persistentEntries, tempOnly); + + auto& entriesEvictedCounter = bm.getEntriesEvictedCounter(); + + REQUIRE(entriesEvictedCounter.count() == expectedEvictions); + checkArchivedBucketList(); + + // Close ledgers until evicted DEADENTRYs merge with + // original INITENTRYs. This checks that BucketList + // invariants are respected + for (auto initialDeadMerges = + bm.readMergeCounters().mOldInitEntriesMergedWithNewDead; + bm.readMergeCounters().mOldInitEntriesMergedWithNewDead < + initialDeadMerges + tempEntries.size(); + ++ledgerSeq) { - // Check that we only evict one entry at a time - stateArchivalSettings.maxEntriesToArchive = 1; - stateArchivalSettings.startingEvictionScanLevel = levelToScan; - updateNetworkCfg(); + closeLedger(*app); + } - auto& entriesEvictedCounter = bm.getEntriesEvictedCounter(); - auto prevIter = evictionIter; - for (auto prevCount = entriesEvictedCounter.count(); - prevCount < tempEntries.size();) - { - closeLedger(*app); + REQUIRE(entriesEvictedCounter.count() == expectedEvictions); + } + + SECTION("shadowed entries not evicted") + { + // Set eviction to start at level where the entries + // currently are + stateArchivalSettings.startingEvictionScanLevel = levelToScan; + updateNetworkCfg(); - // Make sure we evict all entries without circling back - // through the BucketList - auto didAdvance = - prevIter.bucketFileOffset < - evictionIter.bucketFileOffset || - prevIter.bucketListLevel < - evictionIter.bucketListLevel || - // assert isCurrBucket goes from true -> false - // true > false == 1 > 0 - prevIter.isCurrBucket > evictionIter.isCurrBucket; - REQUIRE(didAdvance); - - // Check that we only evict at most maxEntriesToArchive - // per ledger - auto newCount = entriesEvictedCounter.count(); - REQUIRE( - (newCount == prevCount || newCount == prevCount + 1)); - prevCount = newCount; + // Shadow non-live entries with updated, live versions + for (auto& e : entries) + { + // Only need to update TTLEntries + if (e.data.type() == TTL) + { + e.data.ttl().liveUntilLedgerSeq = ledgerSeq + 10; } + } + lm.setNextLedgerEntryBatchForBucketTesting({}, entries, {}); - // All entries should have been evicted - checkIfEntryExists(tempEntries, false); - checkIfEntryExists(persistentEntries, true); + // Close two ledgers to give eviction scan opportunity to + // process new entries + closeLedger(*app); + closeLedger(*app); + + // Entries are shadowed, should not be evicted + checkIfEntryExists(tempEntries, true); + checkIfEntryExists(persistentEntries, true); + } + + SECTION("maxEntriesToArchive") + { + // Check that we only evict one entry at a time + stateArchivalSettings.maxEntriesToArchive = 1; + stateArchivalSettings.startingEvictionScanLevel = levelToScan; + updateNetworkCfg(); + + auto& entriesEvictedCounter = bm.getEntriesEvictedCounter(); + auto prevIter = evictionIter; + for (auto prevCount = entriesEvictedCounter.count(); + prevCount < expectedEvictions;) + { + closeLedger(*app); + + // Make sure we evict all entries without circling back + // through the BucketList + auto didAdvance = + prevIter.bucketFileOffset < evictionIter.bucketFileOffset || + prevIter.bucketListLevel < evictionIter.bucketListLevel || + // assert isCurrBucket goes from true -> false + // true > false == 1 > 0 + prevIter.isCurrBucket > evictionIter.isCurrBucket; + REQUIRE(didAdvance); + + // Check that we only evict at most maxEntriesToArchive + // per ledger + auto newCount = entriesEvictedCounter.count(); + REQUIRE((newCount == prevCount || newCount == prevCount + 1)); + prevCount = newCount; } - SECTION( - "maxEntriesToArchive with entry modified on eviction ledger") + // All entries should have been evicted + checkIfEntryExists(tempEntries, false); + checkIfEntryExists(persistentEntries, tempOnly); + checkArchivedBucketList(); + } + + SECTION("maxEntriesToArchive with entry modified on eviction ledger") + { + // This test is for an edge case in background eviction. + // We want to test that if entry n should be the last entry + // evicted due to maxEntriesToArchive, but that entry is + // updated on the eviction ledger, background eviction + // should still evict entry n + 1 + stateArchivalSettings.maxEntriesToArchive = 1; + stateArchivalSettings.startingEvictionScanLevel = levelToScan; + updateNetworkCfg(); + + // First temp entry in Bucket will be updated with live TTL + std::optional entryToUpdate{}; + + // Second temp entry in bucket should be evicted + LedgerKey entryToEvict; + std::optional expectedEndIterPosition{}; + + auto willBeEvicited = [&](LedgerEntry const& le) { + if (tempOnly) + { + return isTemporaryEntry(le.data); + } + else + { + return isSorobanEntry(le.data); + } + }; + + for (LiveBucketInputIterator in(bl.getLevel(levelToScan).getCurr()); + in; ++in) { - if (backgroundScan) + auto be = *in; + if (be.type() == INITENTRY || be.type() == LIVEENTRY) { - // This test is for an edge case in background eviction. - // We want to test that if entry n should be the last entry - // evicted due to maxEntriesToArchive, but that entry is - // updated on the eviction ledger, background eviction - // should still evict entry n + 1 - stateArchivalSettings.maxEntriesToArchive = 1; - stateArchivalSettings.startingEvictionScanLevel = - levelToScan; - updateNetworkCfg(); - - // First temp entry in Bucket will be updated with live TTL - std::optional entryToUpdate{}; - - // Second temp entry in bucket should be evicted - LedgerKey entryToEvict; - std::optional expectedEndIterPosition{}; - - for (BucketInputIterator in( - bl.getLevel(levelToScan).getCurr()); - in; ++in) + auto le = be.liveEntry(); + if (willBeEvicited(le)) { - // Temp entries should be sorted before persistent in - // the Bucket - auto be = *in; - if (be.type() == INITENTRY || be.type() == LIVEENTRY) + if (!entryToUpdate) + { + entryToUpdate = LedgerEntryKey(le); + } + else { - auto le = be.liveEntry(); - if (le.data.type() == CONTRACT_DATA && - le.data.contractData().durability == TEMPORARY) - { - if (!entryToUpdate) - { - entryToUpdate = LedgerEntryKey(le); - } - else - { - entryToEvict = LedgerEntryKey(le); - expectedEndIterPosition = in.pos(); - break; - } - } + entryToEvict = LedgerEntryKey(le); + expectedEndIterPosition = in.pos(); + break; } } + } + } - REQUIRE(expectedEndIterPosition.has_value()); + REQUIRE(expectedEndIterPosition.has_value()); - // Update first evictable entry with new TTL - auto ttlKey = getTTLKey(*entryToUpdate); - LedgerEntry ttlLe; - ttlLe.data.type(TTL); - ttlLe.data.ttl().keyHash = ttlKey.ttl().keyHash; - ttlLe.data.ttl().liveUntilLedgerSeq = ledgerSeq + 1; + // Update first evictable entry with new TTL + auto ttlKey = getTTLKey(*entryToUpdate); + LedgerEntry ttlLe; + ttlLe.data.type(TTL); + ttlLe.data.ttl().keyHash = ttlKey.ttl().keyHash; + ttlLe.data.ttl().liveUntilLedgerSeq = ledgerSeq + 1; - lm.setNextLedgerEntryBatchForBucketTesting({}, {ttlLe}, {}); - closeLedger(*app); + lm.setNextLedgerEntryBatchForBucketTesting({}, {ttlLe}, {}); + closeLedger(*app); - LedgerTxn ltx(app->getLedgerTxnRoot()); - auto firstEntry = ltx.loadWithoutRecord(*entryToUpdate); - REQUIRE(static_cast(firstEntry)); + LedgerTxn ltx(app->getLedgerTxnRoot()); + auto firstEntry = ltx.loadWithoutRecord(*entryToUpdate); + REQUIRE(static_cast(firstEntry)); - auto evictedEntry = ltx.loadWithoutRecord(entryToEvict); - REQUIRE(!static_cast(evictedEntry)); + auto evictedEntry = ltx.loadWithoutRecord(entryToEvict); + REQUIRE(!static_cast(evictedEntry)); - REQUIRE(evictionIter.bucketFileOffset == - *expectedEndIterPosition); - REQUIRE(evictionIter.bucketListLevel == levelToScan); - REQUIRE(evictionIter.isCurrBucket == true); - } - } + REQUIRE(evictionIter.bucketFileOffset == *expectedEndIterPosition); + REQUIRE(evictionIter.bucketListLevel == levelToScan); + REQUIRE(evictionIter.isCurrBucket == true); + } - auto constexpr xdrOverheadBytes = 4; + auto constexpr xdrOverheadBytes = 4; - BucketInputIterator metaIn(bl.getLevel(0).getCurr()); - BucketEntry be(METAENTRY); - be.metaEntry() = metaIn.getMetadata(); - auto const metadataSize = xdr::xdr_size(be) + xdrOverheadBytes; + LiveBucketInputIterator metaIn(bl.getLevel(0).getCurr()); + BucketEntry be(METAENTRY); + be.metaEntry() = metaIn.getMetadata(); + auto const metadataSize = xdr::xdr_size(be) + xdrOverheadBytes; - SECTION("evictionScanSize") - { - // Set smallest possible scan size so eviction iterator - // scans one entry per scan - stateArchivalSettings.evictionScanSize = 1; - stateArchivalSettings.startingEvictionScanLevel = levelToScan; - updateNetworkCfg(); + SECTION("evictionScanSize") + { + // Set smallest possible scan size so eviction iterator + // scans one entry per scan + stateArchivalSettings.evictionScanSize = 1; + stateArchivalSettings.startingEvictionScanLevel = levelToScan; + updateNetworkCfg(); - // First eviction scan will only read meta + // First eviction scan will only read meta + closeLedger(*app); + ++ledgerSeq; + + REQUIRE(evictionIter.bucketFileOffset == metadataSize); + REQUIRE(evictionIter.bucketListLevel == levelToScan); + REQUIRE(evictionIter.isCurrBucket == true); + + size_t prevOff = evictionIter.bucketFileOffset; + // Check that each scan only reads one entry + for (LiveBucketInputIterator in(bl.getLevel(levelToScan).getCurr()); + in; ++in) + { + auto startingOffset = evictionIter.bucketFileOffset; closeLedger(*app); ++ledgerSeq; - REQUIRE(evictionIter.bucketFileOffset == metadataSize); - REQUIRE(evictionIter.bucketListLevel == levelToScan); - REQUIRE(evictionIter.isCurrBucket == true); - - size_t prevOff = evictionIter.bucketFileOffset; - // Check that each scan only reads one entry - for (BucketInputIterator in(bl.getLevel(levelToScan).getCurr()); - in; ++in) + // If the BL receives an incoming merge, the scan will + // reset; break at that point. + if (evictionIter.bucketFileOffset < prevOff) { - auto startingOffset = evictionIter.bucketFileOffset; - closeLedger(*app); - ++ledgerSeq; - - // If the BL receives an incoming merge, the scan will - // reset; break at that point. - if (evictionIter.bucketFileOffset < prevOff) - { - break; - } - prevOff = evictionIter.bucketFileOffset; - REQUIRE(evictionIter.bucketFileOffset == - xdr::xdr_size(*in) + startingOffset + - xdrOverheadBytes); - REQUIRE(evictionIter.bucketListLevel == levelToScan); - REQUIRE(evictionIter.isCurrBucket == true); + break; } + prevOff = evictionIter.bucketFileOffset; + REQUIRE(evictionIter.bucketFileOffset == + xdr::xdr_size(*in) + startingOffset + xdrOverheadBytes); + REQUIRE(evictionIter.bucketListLevel == levelToScan); + REQUIRE(evictionIter.isCurrBucket == true); } + } - SECTION("scans across multiple buckets") + SECTION("scans across multiple buckets") + { + for (; bl.getLevel(2).getSnap()->getSize() < 1'000; ++ledgerSeq) { - for (; bl.getLevel(2).getSnap()->getSize() < 1'000; ++ledgerSeq) + lm.setNextLedgerEntryBatchForBucketTesting( + {}, + LedgerTestUtils::generateValidLedgerEntriesWithExclusions( + {CONFIG_SETTING, CONTRACT_DATA, CONTRACT_CODE}, 10), + {}); + closeLedger(*app); + } + + // Reset iterator to level 2 curr bucket that we just populated + stateArchivalSettings.startingEvictionScanLevel = 2; + + // Scan size should scan all of curr bucket and one entry in + // snap per scan + stateArchivalSettings.evictionScanSize = + bl.getLevel(2).getCurr()->getSize() + 1; + + // Reset iterator + evictionIter.bucketFileOffset = 0; + evictionIter.bucketListLevel = 2; + evictionIter.isCurrBucket = true; + updateNetworkCfg(); + + closeLedger(*app); + ++ledgerSeq; + + // Iter should have advanced to snap and read first entry only + REQUIRE(evictionIter.bucketFileOffset == metadataSize); + REQUIRE(evictionIter.bucketListLevel == 2); + REQUIRE(evictionIter.isCurrBucket == false); + } + + SECTION("iterator resets when bucket changes") + { + auto testIterReset = [&](bool isCurr) { + auto const levelToTest = 1; + auto bucket = [&]() { + return isCurr ? bl.getLevel(levelToTest).getCurr() + : bl.getLevel(levelToTest).getSnap(); + }; + + // Iterate until entries spill into level 1 bucket + for (; bucket()->getSize() < 1'000; ++ledgerSeq) { lm.setNextLedgerEntryBatchForBucketTesting( {}, @@ -1138,134 +1394,78 @@ TEST_CASE_VERSIONS("eviction scan", "[bucketlist]") closeLedger(*app); } - // Reset iterator to level 2 curr bucket that we just populated - stateArchivalSettings.startingEvictionScanLevel = 2; - - // Scan size should scan all of curr bucket and one entry in - // snap per scan - stateArchivalSettings.evictionScanSize = - bl.getLevel(2).getCurr()->getSize() + 1; + // Scan meta entry + one other entry in initial scan + stateArchivalSettings.evictionScanSize = metadataSize + 1; - // Reset iterator + // Reset eviction iter start of bucket being tested + stateArchivalSettings.startingEvictionScanLevel = levelToTest; evictionIter.bucketFileOffset = 0; - evictionIter.bucketListLevel = 2; - evictionIter.isCurrBucket = true; + evictionIter.isCurrBucket = isCurr; + evictionIter.bucketListLevel = 1; updateNetworkCfg(); - closeLedger(*app); - ++ledgerSeq; - - // Iter should have advanced to snap and read first entry only - REQUIRE(evictionIter.bucketFileOffset == metadataSize); - REQUIRE(evictionIter.bucketListLevel == 2); - REQUIRE(evictionIter.isCurrBucket == false); - } - - SECTION("iterator resets when bucket changes") - { - auto testIterReset = [&](bool isCurr) { - auto const levelToTest = 1; - auto bucket = [&]() { - return isCurr ? bl.getLevel(levelToTest).getCurr() - : bl.getLevel(levelToTest).getSnap(); - }; - - // Iterate until entries spill into level 1 bucket - for (; bucket()->getSize() < 1'000; ++ledgerSeq) - { - lm.setNextLedgerEntryBatchForBucketTesting( - {}, - LedgerTestUtils:: - generateValidLedgerEntriesWithExclusions( - {CONFIG_SETTING, CONTRACT_DATA, - CONTRACT_CODE}, - 10), - {}); - closeLedger(*app); - } - - // Scan meta entry + one other entry in initial scan - stateArchivalSettings.evictionScanSize = metadataSize + 1; - - // Reset eviction iter start of bucket being tested - stateArchivalSettings.startingEvictionScanLevel = - levelToTest; - evictionIter.bucketFileOffset = 0; - evictionIter.isCurrBucket = isCurr; - evictionIter.bucketListLevel = 1; - updateNetworkCfg(); - - // Advance until one ledger before bucket is updated - auto ledgersUntilUpdate = - BucketList::bucketUpdatePeriod(levelToTest, isCurr) - - 1; // updateNetworkCfg closes a ledger that we need to - // count - for (uint32_t i = 0; i < ledgersUntilUpdate - 1; ++i) - { - auto startingIter = evictionIter; - closeLedger(*app); - ++ledgerSeq; - - // Check that iterator is making progress correctly - REQUIRE(evictionIter.bucketFileOffset > - startingIter.bucketFileOffset); - REQUIRE(evictionIter.bucketListLevel == levelToTest); - REQUIRE(evictionIter.isCurrBucket == isCurr); - } - - // Next ledger close should update bucket - auto startingHash = bucket()->getHash(); + // Advance until one ledger before bucket is updated + auto ledgersUntilUpdate = + LiveBucketList::bucketUpdatePeriod(levelToTest, + isCurr) - + 1; // updateNetworkCfg closes a ledger that we need to + // count + for (uint32_t i = 0; i < ledgersUntilUpdate - 1; ++i) + { + auto startingIter = evictionIter; closeLedger(*app); ++ledgerSeq; - // Check that bucket actually changed - REQUIRE(bucket()->getHash() != startingHash); + // Check that iterator is making progress correctly + REQUIRE(evictionIter.bucketFileOffset > + startingIter.bucketFileOffset); + REQUIRE(evictionIter.bucketListLevel == levelToTest); + REQUIRE(evictionIter.isCurrBucket == isCurr); + } - // The iterator retroactively checks if the Bucket has - // changed, so close one additional ledger to check if the - // iterator has reset - closeLedger(*app); - ++ledgerSeq; + // Next ledger close should update bucket + auto startingHash = bucket()->getHash(); + closeLedger(*app); + ++ledgerSeq; - BucketInputIterator in(bucket()); + // Check that bucket actually changed + REQUIRE(bucket()->getHash() != startingHash); - // Check that iterator has reset to beginning of bucket and - // read meta entry + one additional entry - REQUIRE(evictionIter.bucketFileOffset == - metadataSize + xdr::xdr_size(*in) + - xdrOverheadBytes); - REQUIRE(evictionIter.bucketListLevel == levelToTest); - REQUIRE(evictionIter.isCurrBucket == isCurr); - }; + // The iterator retroactively checks if the Bucket has + // changed, so close one additional ledger to check if the + // iterator has reset + closeLedger(*app); + ++ledgerSeq; - SECTION("curr bucket") - { - testIterReset(true); - } + LiveBucketInputIterator in(bucket()); - SECTION("snap bucket") - { - testIterReset(false); - } + // Check that iterator has reset to beginning of bucket and + // read meta entry + one additional entry + REQUIRE(evictionIter.bucketFileOffset == + metadataSize + xdr::xdr_size(*in) + xdrOverheadBytes); + REQUIRE(evictionIter.bucketListLevel == levelToTest); + REQUIRE(evictionIter.isCurrBucket == isCurr); + }; + + SECTION("curr bucket") + { + testIterReset(true); } - }); + + SECTION("snap bucket") + { + testIterReset(false); + } + } }; - SECTION("legacy scan") - { - test(/*backgroundScan=*/false); - } - SECTION("background scan") - { - test(/*backgroundScan=*/true); - } + for_versions(20, Config::CURRENT_LEDGER_PROTOCOL_VERSION, cfg, test); } TEST_CASE_VERSIONS("Searchable BucketListDB snapshots", "[bucketlist]") { VirtualClock clock; Config cfg(getTestConfig()); - cfg.DEPRECATED_SQL_LEDGER_STATE = false; auto app = createTestApplication(clock, cfg); LedgerManagerForBucketTests& lm = app->getLedgerManager(); @@ -1276,7 +1476,7 @@ TEST_CASE_VERSIONS("Searchable BucketListDB snapshots", "[bucketlist]") entry.data.claimableBalance().amount = 0; auto searchableBL = - bm.getBucketSnapshotManager().copySearchableBucketListSnapshot(); + bm.getBucketSnapshotManager().copySearchableLiveBucketListSnapshot(); // Update entry every 5 ledgers so we can see bucket merge events for (auto ledgerSeq = 1; ledgerSeq < 101; ++ledgerSeq) @@ -1343,30 +1543,30 @@ formatLedgerList(std::vector const& ledgers) TEST_CASE("BucketList number dump", "[bucket][bucketlist][count][!hide]") { - for (uint32_t level = 0; level < BucketList::kNumLevels; ++level) + for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level) { CLOG_INFO(Bucket, "levelSize({}) = {} (formally)", level, - formatU32(BucketList::levelSize(level))); + formatU32(LiveBucketList::levelSize(level))); } - for (uint32_t level = 0; level < BucketList::kNumLevels; ++level) + for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level) { CLOG_INFO(Bucket, "levelHalf({}) = {} (formally)", level, - formatU32(BucketList::levelHalf(level))); + formatU32(LiveBucketList::levelHalf(level))); } for (uint32_t probe : {0x100, 0x10000, 0x1000000}) { - for (uint32_t level = 0; level < BucketList::kNumLevels; ++level) + for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level) { - auto sz = formatU32(BucketList::sizeOfCurr(probe, level)); + auto sz = formatU32(LiveBucketList::sizeOfCurr(probe, level)); CLOG_INFO(Bucket, "sizeOfCurr({:#x}, {}) = {} (precisely)", probe, level, sz); } - for (uint32_t level = 0; level < BucketList::kNumLevels; ++level) + for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level) { - auto sz = formatU32(BucketList::sizeOfSnap(probe, level)); + auto sz = formatU32(LiveBucketList::sizeOfSnap(probe, level)); CLOG_INFO(Bucket, "sizeOfSnap({:#x}, {}) = {} (precisely)", probe, level, sz); } @@ -1375,17 +1575,17 @@ TEST_CASE("BucketList number dump", "[bucket][bucketlist][count][!hide]") std::vector> spillEvents; std::vector> nonMergeCommitEvents; std::vector> mergeCommitEvents; - for (uint32_t level = 0; level < BucketList::kNumLevels; ++level) + for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level) { spillEvents.push_back({}); nonMergeCommitEvents.push_back({}); mergeCommitEvents.push_back({}); } - for (uint32_t level = 0; level < BucketList::kNumLevels; ++level) + for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level) { for (uint32_t ledger = 0; ledger < 0x1000000; ++ledger) { - if (BucketList::levelShouldSpill(ledger, level)) + if (LiveBucketList::levelShouldSpill(ledger, level)) { spillEvents[level].push_back(ledger); if (spillEvents[level].size() > 5) @@ -1393,11 +1593,12 @@ TEST_CASE("BucketList number dump", "[bucket][bucketlist][count][!hide]") break; } } - if (level != 0 && BucketList::levelShouldSpill(ledger, level - 1)) + if (level != 0 && + LiveBucketList::levelShouldSpill(ledger, level - 1)) { uint32_t nextChangeLedger = - ledger + BucketList::levelHalf(level - 1); - if (BucketList::levelShouldSpill(nextChangeLedger, level)) + ledger + LiveBucketList::levelHalf(level - 1); + if (LiveBucketList::levelShouldSpill(nextChangeLedger, level)) { nonMergeCommitEvents[level].push_back(ledger); } @@ -1408,17 +1609,17 @@ TEST_CASE("BucketList number dump", "[bucket][bucketlist][count][!hide]") } } } - for (uint32_t level = 0; level < BucketList::kNumLevels; ++level) + for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level) { auto ls = formatLedgerList(spillEvents[level]); CLOG_INFO(Bucket, "levelShouldSpill({:#x}) = true @ {}", level, ls); } - for (uint32_t level = 0; level < BucketList::kNumLevels; ++level) + for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level) { auto ls = formatLedgerList(mergeCommitEvents[level]); CLOG_INFO(Bucket, "mergeCommit({:#x}) @ {}", level, ls); } - for (uint32_t level = 0; level < BucketList::kNumLevels; ++level) + for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level) { auto ls = formatLedgerList(nonMergeCommitEvents[level]); CLOG_INFO(Bucket, "nonMergeCommit({:#x}) @ {}", level, ls); @@ -1427,12 +1628,12 @@ TEST_CASE("BucketList number dump", "[bucket][bucketlist][count][!hide]") // Print out the full bucketlist at an arbitrarily-chosen probe ledger. uint32_t probe = 0x11f9ab; CLOG_INFO(Bucket, "BucketList state at {:#x}", probe); - for (uint32_t level = 0; level < BucketList::kNumLevels; ++level) + for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level) { - uint32_t currOld = BucketList::oldestLedgerInCurr(probe, level); - uint32_t snapOld = BucketList::oldestLedgerInSnap(probe, level); - uint32_t currSz = BucketList::sizeOfCurr(probe, level); - uint32_t snapSz = BucketList::sizeOfSnap(probe, level); + uint32_t currOld = LiveBucketList::oldestLedgerInCurr(probe, level); + uint32_t snapOld = LiveBucketList::oldestLedgerInSnap(probe, level); + uint32_t currSz = LiveBucketList::sizeOfCurr(probe, level); + uint32_t snapSz = LiveBucketList::sizeOfSnap(probe, level); uint32_t currNew = currOld + currSz - 1; uint32_t snapNew = snapOld + snapSz - 1; CLOG_INFO( diff --git a/src/bucket/test/BucketManagerTests.cpp b/src/bucket/test/BucketManagerTests.cpp index fd7653205b..c2f51c35b6 100644 --- a/src/bucket/test/BucketManagerTests.cpp +++ b/src/bucket/test/BucketManagerTests.cpp @@ -40,11 +40,11 @@ namespace BucketManagerTests { static void -clearFutures(Application::pointer app, BucketList& bl) +clearFutures(Application::pointer app, LiveBucketList& bl) { // First go through the BL and mop up all the FutureBuckets. - for (uint32_t i = 0; i < BucketList::kNumLevels; ++i) + for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) { bl.getLevel(i).getNext().clear(); } @@ -54,13 +54,9 @@ clearFutures(Application::pointer app, BucketList& bl) size_t n = static_cast(app->getConfig().WORKER_THREADS); - // If background eviction is enabled, we have one fewer worker thread for - // bucket merges - if (app->getConfig().isUsingBackgroundEviction()) - { - releaseAssert(n != 0); - --n; - } + // Background eviction takes up one worker thread. + releaseAssert(n != 0); + --n; std::mutex mutex; std::condition_variable cv, cv2; @@ -193,113 +189,90 @@ TEST_CASE("skip list", "[bucket][bucketmanager]") TEST_CASE_VERSIONS("bucketmanager ownership", "[bucket][bucketmanager]") { - auto test = [&](bool bucketListDB) { - VirtualClock clock; - Config cfg = getTestConfig(); - - // Make sure all Buckets serialize indexes to disk for test - cfg.BUCKETLIST_DB_INDEX_CUTOFF = 0; - cfg.MANUAL_CLOSE = false; - - if (bucketListDB) - { - // Enable BucketListDB with persistent indexes - cfg.DEPRECATED_SQL_LEDGER_STATE = false; - cfg.NODE_IS_VALIDATOR = false; - cfg.FORCE_SCP = false; - } - - for_versions_with_differing_bucket_logic(cfg, [&](Config const& cfg) { - Application::pointer app = createTestApplication(clock, cfg); - - std::vector live( - LedgerTestUtils::generateValidUniqueLedgerEntriesWithExclusions( - {CONFIG_SETTING}, 10)); - std::vector dead{}; - - std::shared_ptr b1; - - { - std::shared_ptr b2 = Bucket::fresh( - app->getBucketManager(), getAppLedgerVersion(app), {}, live, - dead, /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - b1 = b2; - - // Bucket is referenced by b1, b2 and the BucketManager. - CHECK(b1.use_count() == 3); - - std::shared_ptr b3 = Bucket::fresh( - app->getBucketManager(), getAppLedgerVersion(app), {}, live, - dead, /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - std::shared_ptr b4 = Bucket::fresh( - app->getBucketManager(), getAppLedgerVersion(app), {}, live, - dead, /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - // Bucket is referenced by b1, b2, b3, b4 and the BucketManager. - CHECK(b1.use_count() == 5); - } - - // Take pointer by reference to not mess up use_count() - auto dropBucket = [&](std::shared_ptr& b) { - std::string filename = b->getFilename().string(); - std::string indexFilename = - app->getBucketManager().bucketIndexFilename(b->getHash()); - CHECK(fs::exists(filename)); - if (bucketListDB) - { - CHECK(fs::exists(indexFilename)); - } + VirtualClock clock; + Config cfg = getTestConfig(); - b.reset(); - app->getBucketManager().forgetUnreferencedBuckets(); - CHECK(!fs::exists(filename)); - CHECK(!fs::exists(indexFilename)); - }; + // Make sure all Buckets serialize indexes to disk for test + cfg.BUCKETLIST_DB_INDEX_CUTOFF = 0; + cfg.MANUAL_CLOSE = false; - // Bucket is now only referenced by b1 and the BucketManager. - CHECK(b1.use_count() == 2); + for_versions_with_differing_bucket_logic(cfg, [&](Config const& cfg) { + Application::pointer app = createTestApplication(clock, cfg); - // Drop bucket ourselves then purge bucketManager. - dropBucket(b1); + std::vector live( + LedgerTestUtils::generateValidUniqueLedgerEntriesWithExclusions( + {CONFIG_SETTING}, 10)); + std::vector dead{}; - // Try adding a bucket to the BucketManager's bucketlist - auto& bl = app->getBucketManager().getBucketList(); - bl.addBatch(*app, 1, getAppLedgerVersion(app), {}, live, dead); - clearFutures(app, bl); - b1 = bl.getLevel(0).getCurr(); + std::shared_ptr b1; - // Bucket should be referenced by bucketlist itself, BucketManager - // cache and b1. - CHECK(b1.use_count() == 3); + { + std::shared_ptr b2 = LiveBucket::fresh( + app->getBucketManager(), getAppLedgerVersion(app), {}, live, + dead, /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); + b1 = b2; - // This shouldn't change if we forget unreferenced buckets since - // it's referenced by bucketlist. - app->getBucketManager().forgetUnreferencedBuckets(); + // Bucket is referenced by b1, b2 and the BucketManager. CHECK(b1.use_count() == 3); - // But if we mutate the curr bucket of the bucketlist, it should. - live[0] = LedgerTestUtils::generateValidLedgerEntryWithExclusions( - {CONFIG_SETTING}); - bl.addBatch(*app, 1, getAppLedgerVersion(app), {}, live, dead); - clearFutures(app, bl); - CHECK(b1.use_count() == 2); - - // Drop it again. - dropBucket(b1); - }); - }; + std::shared_ptr b3 = LiveBucket::fresh( + app->getBucketManager(), getAppLedgerVersion(app), {}, live, + dead, /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); + std::shared_ptr b4 = LiveBucket::fresh( + app->getBucketManager(), getAppLedgerVersion(app), {}, live, + dead, /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); + // Bucket is referenced by b1, b2, b3, b4 and the BucketManager. + CHECK(b1.use_count() == 5); + } - SECTION("BucketListDB") - { - test(true); - } + // Take pointer by reference to not mess up use_count() + auto dropBucket = [&](std::shared_ptr& b) { + std::string filename = b->getFilename().string(); + std::string indexFilename = + app->getBucketManager().bucketIndexFilename(b->getHash()); + CHECK(fs::exists(filename)); + CHECK(fs::exists(indexFilename)); - SECTION("SQL") - { - test(false); - } + b.reset(); + app->getBucketManager().forgetUnreferencedBuckets(); + CHECK(!fs::exists(filename)); + CHECK(!fs::exists(indexFilename)); + }; + + // Bucket is now only referenced by b1 and the BucketManager. + CHECK(b1.use_count() == 2); + + // Drop bucket ourselves then purge bucketManager. + dropBucket(b1); + + // Try adding a bucket to the BucketManager's bucketlist + auto& bl = app->getBucketManager().getLiveBucketList(); + bl.addBatch(*app, 1, getAppLedgerVersion(app), {}, live, dead); + clearFutures(app, bl); + b1 = bl.getLevel(0).getCurr(); + + // Bucket should be referenced by bucketlist itself, BucketManager + // cache and b1. + CHECK(b1.use_count() == 3); + + // This shouldn't change if we forget unreferenced buckets since + // it's referenced by bucketlist. + app->getBucketManager().forgetUnreferencedBuckets(); + CHECK(b1.use_count() == 3); + + // But if we mutate the curr bucket of the bucketlist, it should. + live[0] = LedgerTestUtils::generateValidLedgerEntryWithExclusions( + {CONFIG_SETTING}); + bl.addBatch(*app, 1, getAppLedgerVersion(app), {}, live, dead); + clearFutures(app, bl); + CHECK(b1.use_count() == 2); + + // Drop it again. + dropBucket(b1); + }); } TEST_CASE("bucketmanager missing buckets fail", "[bucket][bucketmanager]") @@ -310,7 +283,7 @@ TEST_CASE("bucketmanager missing buckets fail", "[bucket][bucketmanager]") VirtualClock clock; auto app = createTestApplication(clock, cfg); BucketManager& bm = app->getBucketManager(); - BucketList& bl = bm.getBucketList(); + LiveBucketList& bl = bm.getLiveBucketList(); LedgerManagerForBucketTests& lm = app->getLedgerManager(); uint32_t ledger = 0; @@ -324,7 +297,7 @@ TEST_CASE("bucketmanager missing buckets fail", "[bucket][bucketmanager]") {CONFIG_SETTING}, 10), {}); closeLedger(*app); - } while (!BucketList::levelShouldSpill(ledger, level - 1)); + } while (!LiveBucketList::levelShouldSpill(ledger, level - 1)); auto someBucket = bl.getLevel(1).getCurr(); someBucketFileName = someBucket->getFilename().string(); } @@ -353,7 +326,7 @@ TEST_CASE_VERSIONS("bucketmanager reattach to finished merge", Application::pointer app = createTestApplication(clock, cfg); BucketManager& bm = app->getBucketManager(); - BucketList& bl = bm.getBucketList(); + LiveBucketList& bl = bm.getLiveBucketList(); auto vers = getAppLedgerVersion(app); // Add some entries to get to a nontrivial merge-state. @@ -365,13 +338,13 @@ TEST_CASE_VERSIONS("bucketmanager reattach to finished merge", auto lh = app->getLedgerManager().getLastClosedLedgerHeader().header; lh.ledgerSeq = ledger; - addBatchAndUpdateSnapshot( - bl, *app, lh, {}, + addLiveBatchAndUpdateSnapshot( + *app, lh, {}, LedgerTestUtils::generateValidLedgerEntriesWithExclusions( {CONFIG_SETTING}, 10), {}); bm.forgetUnreferencedBuckets(); - } while (!BucketList::levelShouldSpill(ledger, level - 1)); + } while (!LiveBucketList::levelShouldSpill(ledger, level - 1)); // Check that the merge on level isn't committed (we're in // ARTIFICIALLY_PESSIMIZE_MERGES_FOR_TESTING mode that does not resolve @@ -396,7 +369,7 @@ TEST_CASE_VERSIONS("bucketmanager reattach to finished merge", // Reattach to _finished_ merge future on level. has2.currentBuckets[level].next.makeLive( - *app, vers, BucketList::keepDeadEntries(level)); + *app, vers, LiveBucketList::keepTombstoneEntries(level)); REQUIRE(has2.currentBuckets[level].next.isMerging()); // Resolve reattached future. @@ -420,7 +393,7 @@ TEST_CASE_VERSIONS("bucketmanager reattach to running merge", Application::pointer app = createTestApplication(clock, cfg); BucketManager& bm = app->getBucketManager(); - BucketList& bl = bm.getBucketList(); + LiveBucketList& bl = bm.getLiveBucketList(); auto vers = getAppLedgerVersion(app); // This test is a race that will (if all goes well) eventually be won: @@ -454,8 +427,8 @@ TEST_CASE_VERSIONS("bucketmanager reattach to running merge", auto lh = app->getLedgerManager().getLastClosedLedgerHeader().header; lh.ledgerSeq = ledger; - addBatchAndUpdateSnapshot( - bl, *app, lh, {}, + addLiveBatchAndUpdateSnapshot( + *app, lh, {}, LedgerTestUtils::generateValidUniqueLedgerEntriesWithExclusions( {CONFIG_SETTING}, 100), {}); @@ -473,12 +446,14 @@ TEST_CASE_VERSIONS("bucketmanager reattach to running merge", // win quite shortly). HistoryArchiveState has2; has2.fromString(serialHas); - for (uint32_t level = 0; level < BucketList::kNumLevels; ++level) + for (uint32_t level = 0; level < LiveBucketList::kNumLevels; + ++level) { if (has2.currentBuckets[level].next.hasHashes()) { has2.currentBuckets[level].next.makeLive( - *app, vers, BucketList::keepDeadEntries(level)); + *app, vers, + LiveBucketList::keepTombstoneEntries(level)); } } } @@ -499,17 +474,17 @@ TEST_CASE("bucketmanager do not leak empty-merge futures", // are thereby not leaking. Disable BucketListDB so that snapshots do not // hold persist buckets, complicating bucket counting. VirtualClock clock; - Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS)); + Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY)); cfg.ARTIFICIALLY_PESSIMIZE_MERGES_FOR_TESTING = true; cfg.TESTING_UPGRADE_LEDGER_PROTOCOL_VERSION = static_cast( - Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY) - + LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY) - 1; auto app = createTestApplication(clock, cfg); BucketManager& bm = app->getBucketManager(); - BucketList& bl = bm.getBucketList(); + LiveBucketList& bl = bm.getLiveBucketList(); LedgerManagerForBucketTests& lm = app->getLedgerManager(); // We create 8 live ledger entries spread across 8 ledgers then add a ledger @@ -578,8 +553,6 @@ TEST_CASE_VERSIONS( auto vers = getAppLedgerVersion(app); auto& hm = app->getHistoryManager(); auto& bm = app->getBucketManager(); - auto& bl = bm.getBucketList(); - auto& lm = app->getLedgerManager(); hm.setPublicationEnabled(false); app->getHistoryArchiveManager().initializeHistoryArchive( tcfg.getArchiveDirName()); @@ -595,8 +568,8 @@ TEST_CASE_VERSIONS( auto lh = app->getLedgerManager().getLastClosedLedgerHeader().header; lh.ledgerSeq++; - addBatchAndUpdateSnapshot( - bl, *app, lh, {}, + addLiveBatchAndUpdateSnapshot( + *app, lh, {}, LedgerTestUtils::generateValidUniqueLedgerEntriesWithExclusions( {CONFIG_SETTING}, 100), {}); @@ -619,7 +592,7 @@ TEST_CASE_VERSIONS( auto ra = bm.readMergeCounters().mFinishedMergeReattachments; if (protocolVersionIsBefore(vers, - Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) + LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) { // Versions prior to FIRST_PROTOCOL_SHADOWS_REMOVED re-attach to // finished merges @@ -682,9 +655,9 @@ TEST_CASE_VERSIONS( class StopAndRestartBucketMergesTest { static void - resolveAllMerges(BucketList& bl) + resolveAllMerges(LiveBucketList& bl) { - for (uint32 i = 0; i < BucketList::kNumLevels; ++i) + for (uint32 i = 0; i < LiveBucketList::kNumLevels; ++i) { auto& level = bl.getLevel(i); auto& next = level.getNext(); @@ -770,8 +743,8 @@ class StopAndRestartBucketMergesTest checkSensiblePostInitEntryMergeCounters(uint32_t protocol) const { CHECK(mMergeCounters.mPostInitEntryProtocolMerges != 0); - if (protocolVersionIsBefore(protocol, - Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) + if (protocolVersionIsBefore( + protocol, LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) { CHECK(mMergeCounters.mPostShadowRemovalProtocolMerges == 0); } @@ -797,8 +770,8 @@ class StopAndRestartBucketMergesTest CHECK(mMergeCounters.mOldInitEntriesMergedWithNewDead != 0); CHECK(mMergeCounters.mNewEntriesMergedWithOldNeitherInit != 0); - if (protocolVersionIsBefore(protocol, - Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) + if (protocolVersionIsBefore( + protocol, LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) { CHECK(mMergeCounters.mShadowScanSteps != 0); CHECK(mMergeCounters.mLiveEntryShadowElisions != 0); @@ -933,14 +906,14 @@ class StopAndRestartBucketMergesTest { LedgerManager& lm = app.getLedgerManager(); BucketManager& bm = app.getBucketManager(); - BucketList& bl = bm.getBucketList(); + LiveBucketList& bl = bm.getLiveBucketList(); // Complete those merges we're about to inspect. resolveAllMerges(bl); mMergeCounters = bm.readMergeCounters(); mLedgerHeaderHash = lm.getLastClosedLedgerHeader().hash; mBucketListHash = bl.getHash(); - BucketLevel& blv = bl.getLevel(level); + BucketLevel& blv = bl.getLevel(level); mCurrBucketHash = blv.getCurr()->getHash(); mSnapBucketHash = blv.getSnap()->getHash(); } @@ -959,13 +932,13 @@ class StopAndRestartBucketMergesTest collectLedgerEntries(Application& app, std::map& entries) { - auto bl = app.getBucketManager().getBucketList(); - for (uint32_t i = BucketList::kNumLevels; i > 0; --i) + auto bl = app.getBucketManager().getLiveBucketList(); + for (uint32_t i = LiveBucketList::kNumLevels; i > 0; --i) { - BucketLevel const& level = bl.getLevel(i - 1); + BucketLevel const& level = bl.getLevel(i - 1); for (auto bucket : {level.getSnap(), level.getCurr()}) { - for (BucketInputIterator bi(bucket); bi; ++bi) + for (LiveBucketInputIterator bi(bucket); bi; ++bi) { BucketEntry const& e = *bi; if (e.type() == LIVEENTRY || e.type() == INITENTRY) @@ -1008,10 +981,11 @@ class StopAndRestartBucketMergesTest void calculateDesignatedLedgers() { - uint32_t spillFreq = BucketList::levelHalf(mDesignatedLevel); - uint32_t prepFreq = (mDesignatedLevel == 0 - ? 1 - : BucketList::levelHalf(mDesignatedLevel - 1)); + uint32_t spillFreq = LiveBucketList::levelHalf(mDesignatedLevel); + uint32_t prepFreq = + (mDesignatedLevel == 0 + ? 1 + : LiveBucketList::levelHalf(mDesignatedLevel - 1)); uint32_t const SPILLCOUNT = 5; uint32_t const PREPCOUNT = 5; @@ -1215,7 +1189,7 @@ class StopAndRestartBucketMergesTest lm.setNextLedgerEntryBatchForBucketTesting( mInitEntryBatches[i - 2], mLiveEntryBatches[i - 2], mDeadEntryBatches[i - 2]); - resolveAllMerges(app->getBucketManager().getBucketList()); + resolveAllMerges(app->getBucketManager().getLiveBucketList()); auto countersBeforeClose = app->getBucketManager().readMergeCounters(); @@ -1243,13 +1217,15 @@ class StopAndRestartBucketMergesTest auto j = mControlSurveys.find(i); if (j != mControlSurveys.end()) { - if (BucketList::levelShouldSpill(i, mDesignatedLevel - 1)) + if (LiveBucketList::levelShouldSpill(i, mDesignatedLevel - 1)) { // Confirm that there's a merge-in-progress at this level // (closing ledger i should have provoked a spill from // mDesignatedLevel-1 to mDesignatedLevel) - BucketList& bl = app->getBucketManager().getBucketList(); - BucketLevel& blv = bl.getLevel(mDesignatedLevel); + LiveBucketList& bl = + app->getBucketManager().getLiveBucketList(); + BucketLevel& blv = + bl.getLevel(mDesignatedLevel); REQUIRE(blv.getNext().isMerging()); } @@ -1277,11 +1253,13 @@ class StopAndRestartBucketMergesTest clock = std::make_unique(); app = createTestApplication(*clock, cfg, false); - if (BucketList::levelShouldSpill(i, mDesignatedLevel - 1)) + if (LiveBucketList::levelShouldSpill(i, mDesignatedLevel - 1)) { // Confirm that the merge-in-progress was restarted. - BucketList& bl = app->getBucketManager().getBucketList(); - BucketLevel& blv = bl.getLevel(mDesignatedLevel); + LiveBucketList& bl = + app->getBucketManager().getLiveBucketList(); + BucketLevel& blv = + bl.getLevel(mDesignatedLevel); REQUIRE(blv.getNext().isMerging()); } @@ -1315,7 +1293,7 @@ class StopAndRestartBucketMergesTest assert(!mControlSurveys.empty()); if (protocolVersionStartsFrom( mProtocol, - Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY)) + LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY)) { mControlSurveys.rbegin()->second.dumpMergeCounters( "control, Post-INITENTRY", mDesignatedLevel); @@ -1339,11 +1317,11 @@ TEST_CASE("bucket persistence over app restart with initentry", { for (uint32_t protocol : {static_cast( - Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY) - + LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY) - 1, static_cast( - Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY), - static_cast(Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED)}) + LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY), + static_cast(LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)}) { for (uint32_t level : {2, 3}) { @@ -1359,11 +1337,11 @@ TEST_CASE("bucket persistence over app restart with initentry - extended", { for (uint32_t protocol : {static_cast( - Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY) - + LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY) - 1, static_cast( - Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY), - static_cast(Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED)}) + LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY), + static_cast(LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)}) { for (uint32_t level : {2, 3, 4, 5}) { @@ -1418,7 +1396,7 @@ TEST_CASE_VERSIONS("bucket persistence over app restart", VirtualClock clock; Application::pointer app = createTestApplication(clock, cfg0); sk = std::make_optional(cfg0.NODE_SEED); - BucketList& bl = app->getBucketManager().getBucketList(); + LiveBucketList& bl = app->getBucketManager().getLiveBucketList(); uint32_t i = 2; while (i < pause) @@ -1453,7 +1431,7 @@ TEST_CASE_VERSIONS("bucket persistence over app restart", { VirtualClock clock; Application::pointer app = createTestApplication(clock, cfg1); - BucketList& bl = app->getBucketManager().getBucketList(); + LiveBucketList& bl = app->getBucketManager().getLiveBucketList(); uint32_t i = 2; while (i < pause) @@ -1480,7 +1458,7 @@ TEST_CASE_VERSIONS("bucket persistence over app restart", VirtualClock clock; Application::pointer app = Application::create(clock, cfg1, false); app->start(); - BucketList& bl = app->getBucketManager().getBucketList(); + LiveBucketList& bl = app->getBucketManager().getLiveBucketList(); // Confirm that we re-acquired the close-ledger state. REQUIRE( diff --git a/src/bucket/test/BucketMergeMapTests.cpp b/src/bucket/test/BucketMergeMapTests.cpp index b5f9ea81a8..c5883e1c82 100644 --- a/src/bucket/test/BucketMergeMapTests.cpp +++ b/src/bucket/test/BucketMergeMapTests.cpp @@ -22,7 +22,7 @@ TEST_CASE("bucket merge map", "[bucket][bucketmergemap]") std::vector live = LedgerTestUtils::generateValidUniqueLedgerEntriesWithExclusions( {CONFIG_SETTING}, numEntries); - std::shared_ptr b1 = Bucket::fresh( + std::shared_ptr b1 = LiveBucket::fresh( app->getBucketManager(), BucketTestUtils::getAppLedgerVersion(app), {}, live, {}, /*countMergeEvents=*/true, clock.getIOContext(), @@ -30,41 +30,44 @@ TEST_CASE("bucket merge map", "[bucket][bucketmergemap]") return b1; }; - std::shared_ptr in1a = getValidBucket(); - std::shared_ptr in1b = getValidBucket(); - std::shared_ptr in1c = getValidBucket(); + std::shared_ptr in1a = getValidBucket(); + std::shared_ptr in1b = getValidBucket(); + std::shared_ptr in1c = getValidBucket(); - std::shared_ptr in2a = getValidBucket(); - std::shared_ptr in2b = getValidBucket(); - std::shared_ptr in2c = getValidBucket(); + std::shared_ptr in2a = getValidBucket(); + std::shared_ptr in2b = getValidBucket(); + std::shared_ptr in2c = getValidBucket(); - std::shared_ptr in3a = getValidBucket(); - std::shared_ptr in3b = getValidBucket(); - std::shared_ptr in3c = getValidBucket(); - std::shared_ptr in3d = getValidBucket(); + std::shared_ptr in3a = getValidBucket(); + std::shared_ptr in3b = getValidBucket(); + std::shared_ptr in3c = getValidBucket(); + std::shared_ptr in3d = getValidBucket(); - std::shared_ptr in4a = getValidBucket(); - std::shared_ptr in4b = getValidBucket(); + std::shared_ptr in4a = getValidBucket(); + std::shared_ptr in4b = getValidBucket(); - std::shared_ptr in5a = getValidBucket(); - std::shared_ptr in5b = getValidBucket(); + std::shared_ptr in5a = getValidBucket(); + std::shared_ptr in5b = getValidBucket(); - std::shared_ptr in6a = getValidBucket(); - std::shared_ptr in6b = getValidBucket(); + std::shared_ptr in6a = getValidBucket(); + std::shared_ptr in6b = getValidBucket(); - std::shared_ptr out1 = getValidBucket(); - std::shared_ptr out2 = getValidBucket(); - std::shared_ptr out4 = getValidBucket(); - std::shared_ptr out6 = getValidBucket(); + std::shared_ptr out1 = getValidBucket(); + std::shared_ptr out2 = getValidBucket(); + std::shared_ptr out4 = getValidBucket(); + std::shared_ptr out6 = getValidBucket(); BucketMergeMap bmm; - MergeKey m1{true, in1a, in1b, {in1c}}; - MergeKey m2{true, in2a, in2b, {in2c}}; - MergeKey m3{true, in3a, in3b, {in3c, in3d}}; - MergeKey m4{true, in4a, in4b, {}}; - MergeKey m5{true, in5a, in5b, {}}; - MergeKey m6{true, in6a, in6b, {in1a}}; + MergeKey m1{true, in1a->getHash(), in1b->getHash(), {in1c->getHash()}}; + MergeKey m2{true, in2a->getHash(), in2b->getHash(), {in2c->getHash()}}; + MergeKey m3{true, + in3a->getHash(), + in3b->getHash(), + {in3c->getHash(), in3d->getHash()}}; + MergeKey m4{true, in4a->getHash(), in4b->getHash(), {}}; + MergeKey m5{true, in5a->getHash(), in5b->getHash(), {}}; + MergeKey m6{true, in6a->getHash(), in6b->getHash(), {in1a->getHash()}}; bmm.recordMerge(m1, out1->getHash()); bmm.recordMerge(m2, out2->getHash()); diff --git a/src/bucket/test/BucketTestUtils.cpp b/src/bucket/test/BucketTestUtils.cpp index 0200e49442..86ac292c10 100644 --- a/src/bucket/test/BucketTestUtils.cpp +++ b/src/bucket/test/BucketTestUtils.cpp @@ -3,6 +3,7 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "BucketTestUtils.h" +#include "bucket/Bucket.h" #include "bucket/BucketInputIterator.h" #include "bucket/BucketManager.h" #include "crypto/Hex.h" @@ -10,6 +11,9 @@ #include "ledger/LedgerTxn.h" #include "main/Application.h" #include "test/test.h" +#include "util/ProtocolVersion.h" +#include "xdr/Stellar-ledger.h" +#include namespace stellar { @@ -30,18 +34,43 @@ getAppLedgerVersion(Application::pointer app) } void -addBatchAndUpdateSnapshot(BucketList& bl, Application& app, LedgerHeader header, - std::vector const& initEntries, - std::vector const& liveEntries, - std::vector const& deadEntries) +addLiveBatchAndUpdateSnapshot(Application& app, LedgerHeader header, + std::vector const& initEntries, + std::vector const& liveEntries, + std::vector const& deadEntries) { - bl.addBatch(app, header.ledgerSeq, header.ledgerVersion, initEntries, - liveEntries, deadEntries); - if (app.getConfig().isUsingBucketListDB()) - { - app.getBucketManager().getBucketSnapshotManager().updateCurrentSnapshot( - std::make_unique(bl, header)); - } + auto& liveBl = app.getBucketManager().getLiveBucketList(); + liveBl.addBatch(app, header.ledgerSeq, header.ledgerVersion, initEntries, + liveEntries, deadEntries); + + auto liveSnapshot = + std::make_unique>(liveBl, header); + auto hotArchiveSnapshot = + std::make_unique>( + app.getBucketManager().getHotArchiveBucketList(), header); + + app.getBucketManager().getBucketSnapshotManager().updateCurrentSnapshot( + std::move(liveSnapshot), std::move(hotArchiveSnapshot)); +} + +void +addHotArchiveBatchAndUpdateSnapshot( + Application& app, LedgerHeader header, + std::vector const& archiveEntries, + std::vector const& restoredEntries, + std::vector const& deletedEntries) +{ + auto& hotArchiveBl = app.getBucketManager().getHotArchiveBucketList(); + hotArchiveBl.addBatch(app, header.ledgerSeq, header.ledgerVersion, + archiveEntries, restoredEntries, deletedEntries); + auto liveSnapshot = std::make_unique>( + app.getBucketManager().getLiveBucketList(), header); + auto hotArchiveSnapshot = + std::make_unique>(hotArchiveBl, + header); + + app.getBucketManager().getBucketSnapshotManager().updateCurrentSnapshot( + std::move(liveSnapshot), std::move(hotArchiveSnapshot)); } void @@ -50,21 +79,14 @@ for_versions_with_differing_bucket_logic( { for_versions( {static_cast( - Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY) - + LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY) - 1, static_cast( - Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY), - static_cast(Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED)}, + LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY), + static_cast(LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)}, cfg, f); } -size_t -countEntries(std::shared_ptr bucket) -{ - EntryCounts e(bucket); - return e.sum(); -} - Hash closeLedger(Application& app, std::optional skToSignValue, xdr::xvector upgrades) @@ -74,7 +96,7 @@ closeLedger(Application& app, std::optional skToSignValue, uint32_t ledgerNum = lcl.header.ledgerSeq + 1; CLOG_INFO(Bucket, "Artificially closing ledger {} with lcl={}, buckets={}", ledgerNum, hexAbbrev(lcl.hash), - hexAbbrev(app.getBucketManager().getBucketList().getHash())); + hexAbbrev(app.getBucketManager().getLiveBucketList().getHash())); app.getHerder().externalizeValue(TxSetXDRFrame::makeEmpty(lcl), ledgerNum, lcl.header.scpValue.closeTime, upgrades, skToSignValue); @@ -87,9 +109,10 @@ closeLedger(Application& app) return closeLedger(app, std::nullopt); } -EntryCounts::EntryCounts(std::shared_ptr bucket) +template <> +EntryCounts::EntryCounts(std::shared_ptr bucket) { - BucketInputIterator iter(bucket); + LiveBucketInputIterator iter(bucket); if (iter.seenMetadata()) { ++nMeta; @@ -99,7 +122,7 @@ EntryCounts::EntryCounts(std::shared_ptr bucket) switch ((*iter).type()) { case INITENTRY: - ++nInit; + ++nInitOrArchived; break; case LIVEENTRY: ++nLive; @@ -116,6 +139,48 @@ EntryCounts::EntryCounts(std::shared_ptr bucket) } } +template <> +EntryCounts::EntryCounts( + std::shared_ptr bucket) +{ + HotArchiveBucketInputIterator iter(bucket); + if (iter.seenMetadata()) + { + ++nMeta; + } + while (iter) + { + switch ((*iter).type()) + { + case HOT_ARCHIVE_ARCHIVED: + ++nInitOrArchived; + break; + case HOT_ARCHIVE_LIVE: + ++nLive; + break; + case HOT_ARCHIVE_DELETED: + ++nDead; + break; + case HOT_ARCHIVE_METAENTRY: + // This should never happen: only the first record can be METAENTRY + // and it is counted above. + abort(); + } + ++iter; + } +} + +template +size_t +countEntries(std::shared_ptr bucket) +{ + EntryCounts e(bucket); + return e.sum(); +} + +template size_t countEntries(std::shared_ptr bucket); +template size_t countEntries(std::shared_ptr bucket); + void LedgerManagerForBucketTests::transferLedgerEntriesToBucketList( AbstractLedgerTxn& ltx, @@ -163,22 +228,23 @@ LedgerManagerForBucketTests::transferLedgerEntriesToBucketList( } LedgerTxn ltxEvictions(ltx); - if (mApp.getConfig().isUsingBackgroundEviction()) - { + + auto evictedEntries = mApp.getBucketManager().resolveBackgroundEvictionScan( - ltxEvictions, lh.ledgerSeq, keys); - } - else + ltxEvictions, lh.ledgerSeq, keys, initialLedgerVers); + + if (protocolVersionStartsFrom( + initialLedgerVers, + Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) { - mApp.getBucketManager().scanForEvictionLegacy(ltxEvictions, - lh.ledgerSeq); + mApp.getBucketManager().addHotArchiveBatch( + mApp, lh, evictedEntries.second, {}, {}); } - if (ledgerCloseMeta) { - ledgerCloseMeta->populateEvictedEntries( - ltxEvictions.getChanges()); + ledgerCloseMeta->populateEvictedEntries(evictedEntries); } + ltxEvictions.commit(); } mApp.getLedgerManager() @@ -191,8 +257,7 @@ LedgerManagerForBucketTests::transferLedgerEntriesToBucketList( // Add dead entries from ltx to entries that will be added to BucketList // so we can test background eviction properly if (protocolVersionStartsFrom(initialLedgerVers, - SOROBAN_PROTOCOL_VERSION) && - mApp.getConfig().isUsingBackgroundEviction()) + SOROBAN_PROTOCOL_VERSION)) { for (auto const& k : dead) { @@ -201,8 +266,8 @@ LedgerManagerForBucketTests::transferLedgerEntriesToBucketList( } // Use the testing values. - mApp.getBucketManager().addBatch(mApp, lh, mTestInitEntries, - mTestLiveEntries, mTestDeadEntries); + mApp.getBucketManager().addLiveBatch( + mApp, lh, mTestInitEntries, mTestLiveEntries, mTestDeadEntries); mUseTestEntries = false; } else diff --git a/src/bucket/test/BucketTestUtils.h b/src/bucket/test/BucketTestUtils.h index eddd4ae95b..c77794a80a 100644 --- a/src/bucket/test/BucketTestUtils.h +++ b/src/bucket/test/BucketTestUtils.h @@ -5,17 +5,23 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "test/TestUtils.h" +#include "xdr/Stellar-ledger.h" namespace stellar { namespace BucketTestUtils { -void addBatchAndUpdateSnapshot(BucketList& bl, Application& app, - LedgerHeader header, - std::vector const& initEntries, - std::vector const& liveEntries, - std::vector const& deadEntries); +void addLiveBatchAndUpdateSnapshot(Application& app, LedgerHeader header, + std::vector const& initEntries, + std::vector const& liveEntries, + std::vector const& deadEntries); + +void addHotArchiveBatchAndUpdateSnapshot( + Application& app, LedgerHeader header, + std::vector const& archiveEntries, + std::vector const& restoredEntries, + std::vector const& deletedEntries); uint32_t getAppLedgerVersion(Application& app); @@ -24,27 +30,30 @@ uint32_t getAppLedgerVersion(std::shared_ptr app); void for_versions_with_differing_bucket_logic( Config const& cfg, std::function const& f); -struct EntryCounts +template struct EntryCounts { + static_assert(std::is_same_v || + std::is_same_v); + size_t nMeta{0}; - size_t nInit{0}; + size_t nInitOrArchived{0}; size_t nLive{0}; size_t nDead{0}; size_t sum() const { - return nLive + nInit + nDead; + return nLive + nInitOrArchived + nDead; } size_t sumIncludingMeta() const { - return nLive + nInit + nDead + nMeta; + return nLive + nInitOrArchived + nDead + nMeta; } - EntryCounts(std::shared_ptr bucket); + EntryCounts(std::shared_ptr bucket); }; -size_t countEntries(std::shared_ptr bucket); +template size_t countEntries(std::shared_ptr bucket); Hash closeLedger(Application& app, std::optional skToSignValue, xdr::xvector upgrades = emptyUpgradeSteps); diff --git a/src/bucket/test/BucketTests.cpp b/src/bucket/test/BucketTests.cpp index 4da46e26a7..9ec214d1c3 100644 --- a/src/bucket/test/BucketTests.cpp +++ b/src/bucket/test/BucketTests.cpp @@ -48,10 +48,10 @@ for_versions_with_differing_initentry_logic( { for_versions( {static_cast( - Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY) - + LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY) - 1, static_cast( - Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY)}, + LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY)}, cfg, f); } @@ -67,7 +67,7 @@ TEST_CASE_VERSIONS("file backed buckets", "[bucket][bucketbench]") auto dead = LedgerTestUtils::generateValidLedgerEntryKeysWithExclusions( {CONFIG_SETTING}, 1000); CLOG_DEBUG(Bucket, "Hashing entries"); - std::shared_ptr b1 = Bucket::fresh( + std::shared_ptr b1 = LiveBucket::fresh( app->getBucketManager(), getAppLedgerVersion(app), {}, live, dead, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); @@ -83,13 +83,13 @@ TEST_CASE_VERSIONS("file backed buckets", "[bucket][bucketbench]") b1 = Bucket::merge( app->getBucketManager(), app->getConfig().LEDGER_PROTOCOL_VERSION, b1, - Bucket::fresh(app->getBucketManager(), - getAppLedgerVersion(app), {}, live, dead, - /*countMergeEvents=*/true, - clock.getIOContext(), - /*doFsync=*/true), + LiveBucket::fresh(app->getBucketManager(), + getAppLedgerVersion(app), {}, live, dead, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true), /*shadows=*/{}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); } @@ -161,16 +161,16 @@ TEST_CASE_VERSIONS("merging bucket entries", "[bucket]") abort(); } auto deadEntry = LedgerEntryKey(liveEntry); - auto bLive = Bucket::fresh(bm, vers, {}, {liveEntry}, {}, - /*countMergeEvents=*/true, - clock.getIOContext(), - /*doFsync=*/true); - auto bDead = Bucket::fresh(bm, vers, {}, {}, {deadEntry}, - /*countMergeEvents=*/true, - clock.getIOContext(), - /*doFsync=*/true); + auto bLive = LiveBucket::fresh(bm, vers, {}, {liveEntry}, {}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); + auto bDead = LiveBucket::fresh(bm, vers, {}, {}, {deadEntry}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); auto b1 = Bucket::merge(bm, vers, bLive, bDead, /*shadows=*/{}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); @@ -200,17 +200,17 @@ TEST_CASE_VERSIONS("merging bucket entries", "[bucket]") dead.push_back(LedgerEntryKey(e)); } } - auto bLive = - Bucket::fresh(bm, vers, {}, live, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - auto bDead = - Bucket::fresh(bm, vers, {}, {}, dead, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); + auto bLive = LiveBucket::fresh(bm, vers, {}, live, {}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); + auto bDead = LiveBucket::fresh(bm, vers, {}, {}, dead, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); auto b1 = Bucket::merge(bm, vers, bLive, bDead, /*shadows=*/{}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); EntryCounts e(b1); @@ -226,7 +226,7 @@ TEST_CASE_VERSIONS("merging bucket entries", "[bucket]") LedgerTestUtils::generateValidUniqueLedgerEntriesWithExclusions( {CONFIG_SETTING}, 100); std::vector dead; - std::shared_ptr b1 = Bucket::fresh( + std::shared_ptr b1 = LiveBucket::fresh( app->getBucketManager(), getAppLedgerVersion(app), {}, live, dead, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); @@ -258,14 +258,14 @@ TEST_CASE_VERSIONS("merging bucket entries", "[bucket]") ++liveCount; } } - std::shared_ptr b2 = Bucket::fresh( + std::shared_ptr b2 = LiveBucket::fresh( app->getBucketManager(), getAppLedgerVersion(app), {}, live, dead, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); - std::shared_ptr b3 = + std::shared_ptr b3 = Bucket::merge(app->getBucketManager(), app->getConfig().LEDGER_PROTOCOL_VERSION, b1, b2, - /*shadows=*/{}, /*keepDeadEntries=*/true, + /*shadows=*/{}, /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); CHECK(countEntries(b3) == liveCount); @@ -273,6 +273,99 @@ TEST_CASE_VERSIONS("merging bucket entries", "[bucket]") }); } +TEST_CASE_VERSIONS("merging hot archive bucket entries", "[bucket][archival]") +{ + VirtualClock clock; + Config const& cfg = getTestConfig(); + + auto app = createTestApplication(clock, cfg); + for_versions_from(23, *app, [&] { + auto& bm = app->getBucketManager(); + auto vers = getAppLedgerVersion(app); + + SECTION("new annihilates old") + { + auto e1 = + LedgerTestUtils::generateValidLedgerEntryOfType(CONTRACT_CODE); + auto e2 = + LedgerTestUtils::generateValidLedgerEntryOfType(CONTRACT_CODE); + auto e3 = + LedgerTestUtils::generateValidLedgerEntryOfType(CONTRACT_DATA); + auto e4 = + LedgerTestUtils::generateValidLedgerEntryOfType(CONTRACT_DATA); + + // Old bucket: + // e1 -> ARCHIVED + // e2 -> LIVE + // e3 -> DELETED + // e4 -> DELETED + auto b1 = HotArchiveBucket::fresh( + bm, vers, {e1}, {LedgerEntryKey(e2)}, + {LedgerEntryKey(e3), LedgerEntryKey(e4)}, + /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); + + // New bucket: + // e1 -> DELETED + // e2 -> ARCHIVED + // e3 -> LIVE + auto b2 = HotArchiveBucket::fresh( + bm, vers, {e2}, {LedgerEntryKey(e3)}, {LedgerEntryKey(e1)}, + /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); + + // Expected result: + // e1 -> DELETED + // e2 -> ARCHIVED + // e3 -> LIVE + // e4 -> DELETED + auto merged = + Bucket::merge(bm, vers, b1, b2, /*shadows=*/{}, + /*keepTombstoneEntries=*/true, + /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); + + bool seen1 = false; + bool seen4 = false; + auto count = 0; + for (HotArchiveBucketInputIterator iter(merged); iter; ++iter) + { + ++count; + auto const& e = *iter; + if (e.type() == HOT_ARCHIVE_ARCHIVED) + { + REQUIRE(e.archivedEntry() == e2); + } + else if (e.type() == HOT_ARCHIVE_LIVE) + { + REQUIRE(e.key() == LedgerEntryKey(e3)); + } + else if (e.type() == HOT_ARCHIVE_DELETED) + { + if (e.key() == LedgerEntryKey(e1)) + { + REQUIRE(!seen1); + seen1 = true; + } + else if (e.key() == LedgerEntryKey(e4)) + { + REQUIRE(!seen4); + seen4 = true; + } + } + else + { + FAIL(); + } + } + + REQUIRE(seen1); + REQUIRE(seen4); + REQUIRE(count == 4); + } + }); +} + static LedgerEntry generateAccount() { @@ -330,7 +423,8 @@ TEST_CASE("merges proceed old-style despite newer shadows", Config const& cfg = getTestConfig(); Application::pointer app = createTestApplication(clock, cfg); auto& bm = app->getBucketManager(); - auto v12 = static_cast(Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED); + auto v12 = + static_cast(LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED); auto v11 = v12 - 1; auto v10 = v11 - 1; @@ -338,31 +432,31 @@ TEST_CASE("merges proceed old-style despite newer shadows", LedgerEntry otherLiveA = generateDifferentAccount({liveEntry}); auto b10first = - Bucket::fresh(bm, v10, {}, {liveEntry}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); + LiveBucket::fresh(bm, v10, {}, {liveEntry}, {}, + /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); auto b10second = - Bucket::fresh(bm, v10, {}, {otherLiveA}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); + LiveBucket::fresh(bm, v10, {}, {otherLiveA}, {}, + /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); auto b11first = - Bucket::fresh(bm, v11, {}, {liveEntry}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); + LiveBucket::fresh(bm, v11, {}, {liveEntry}, {}, + /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); auto b11second = - Bucket::fresh(bm, v11, {}, {otherLiveA}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); + LiveBucket::fresh(bm, v11, {}, {otherLiveA}, {}, + /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); auto b12first = - Bucket::fresh(bm, v12, {}, {liveEntry}, {}, /*countMergeEvents=*/true, - clock.getIOContext(), - /*doFsync=*/true); + LiveBucket::fresh(bm, v12, {}, {liveEntry}, {}, + /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); auto b12second = - Bucket::fresh(bm, v12, {}, {otherLiveA}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); + LiveBucket::fresh(bm, v12, {}, {otherLiveA}, {}, + /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); SECTION("shadow version 12") { @@ -370,10 +464,10 @@ TEST_CASE("merges proceed old-style despite newer shadows", auto bucket = Bucket::merge(bm, v12, b11first, b11second, /*shadows=*/{b12first}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); - REQUIRE(Bucket::getBucketVersion(bucket) == v11); + REQUIRE(bucket->getBucketVersion() == v11); } SECTION("shadow versions mixed, pick lower") { @@ -382,16 +476,16 @@ TEST_CASE("merges proceed old-style despite newer shadows", auto bucket = Bucket::merge(bm, v12, b10first, b10second, /*shadows=*/{b12first, b11second}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); - REQUIRE(Bucket::getBucketVersion(bucket) == v11); + REQUIRE(bucket->getBucketVersion() == v11); } SECTION("refuse to merge new version with shadow") { REQUIRE_THROWS_AS(Bucket::merge(bm, v12, b12first, b12second, /*shadows=*/{b12first}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true), @@ -409,21 +503,25 @@ TEST_CASE("merges refuse to exceed max protocol version", auto vers = getAppLedgerVersion(app); LedgerEntry liveEntry = generateAccount(); LedgerEntry otherLiveA = generateDifferentAccount({liveEntry}); - auto bold1 = Bucket::fresh(bm, vers - 1, {}, {liveEntry}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - auto bold2 = Bucket::fresh(bm, vers - 1, {}, {otherLiveA}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - auto bnew1 = Bucket::fresh(bm, vers, {}, {liveEntry}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - auto bnew2 = Bucket::fresh(bm, vers, {}, {otherLiveA}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); + auto bold1 = + LiveBucket::fresh(bm, vers - 1, {}, {liveEntry}, {}, + /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); + auto bold2 = + LiveBucket::fresh(bm, vers - 1, {}, {otherLiveA}, {}, + /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); + auto bnew1 = + LiveBucket::fresh(bm, vers, {}, {liveEntry}, {}, + /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); + auto bnew2 = + LiveBucket::fresh(bm, vers, {}, {otherLiveA}, {}, + /*countMergeEvents=*/true, clock.getIOContext(), + /*doFsync=*/true); REQUIRE_THROWS_AS(Bucket::merge(bm, vers - 1, bnew1, bnew2, /*shadows=*/{}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true), @@ -436,7 +534,7 @@ TEST_CASE("bucket output iterator rejects wrong-version entries", VirtualClock clock; Config const& cfg = getTestConfig(); auto vers_new = static_cast( - Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY); + LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY); BucketMetadata meta; meta.ledgerVersion = vers_new - 1; Application::pointer app = createTestApplication(clock, cfg); @@ -447,8 +545,8 @@ TEST_CASE("bucket output iterator rejects wrong-version entries", metaEntry.type(METAENTRY); metaEntry.metaEntry() = meta; MergeCounters mc; - BucketOutputIterator out(bm.getTmpDir(), true, meta, mc, - clock.getIOContext(), /*doFsync=*/true); + LiveBucketOutputIterator out(bm.getTmpDir(), true, meta, mc, + clock.getIOContext(), /*doFsync=*/true); REQUIRE_THROWS_AS(out.put(initEntry), std::runtime_error); REQUIRE_THROWS_AS(out.put(metaEntry), std::runtime_error); } @@ -466,7 +564,8 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry", // Whether we're in the era of supporting or not-supporting INITENTRY. bool initEra = protocolVersionStartsFrom( - vers, Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY); + vers, + LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY); CLOG_INFO(Bucket, "=== finished buckets for initial account == "); @@ -488,17 +587,17 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry", SECTION("dead and init account entries merge correctly") { - auto bInit = - Bucket::fresh(bm, vers, {initEntry}, {}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - auto bDead = - Bucket::fresh(bm, vers, {}, {}, {deadEntry}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); + auto bInit = LiveBucket::fresh(bm, vers, {initEntry}, {}, {}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); + auto bDead = LiveBucket::fresh(bm, vers, {}, {}, {deadEntry}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); auto b1 = Bucket::merge( bm, cfg.LEDGER_PROTOCOL_VERSION, bInit, bDead, /*shadows=*/{}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); // In initEra, the INIT will make it through fresh() to the bucket, @@ -507,7 +606,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry", // fresh(), and that will be killed by the DEAD, leaving 1 // (tombstone) entry. EntryCounts e(b1); - CHECK(e.nInit == 0); + CHECK(e.nInitOrArchived == 0); CHECK(e.nLive == 0); if (initEra) { @@ -524,32 +623,32 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry", SECTION("dead and init entries merge with intervening live entries " "correctly") { - auto bInit = - Bucket::fresh(bm, vers, {initEntry}, {}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - auto bLive = - Bucket::fresh(bm, vers, {}, {liveEntry}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - auto bDead = - Bucket::fresh(bm, vers, {}, {}, {deadEntry}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); + auto bInit = LiveBucket::fresh(bm, vers, {initEntry}, {}, {}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); + auto bLive = LiveBucket::fresh(bm, vers, {}, {liveEntry}, {}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); + auto bDead = LiveBucket::fresh(bm, vers, {}, {}, {deadEntry}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); auto bmerge1 = Bucket::merge( bm, cfg.LEDGER_PROTOCOL_VERSION, bInit, bLive, /*shadows=*/{}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); auto b1 = Bucket::merge( bm, cfg.LEDGER_PROTOCOL_VERSION, bmerge1, bDead, /*shadows=*/{}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); // The same thing should happen here as above, except that the INIT // will merge-over the LIVE during fresh(). EntryCounts e(b1); - CHECK(e.nInit == 0); + CHECK(e.nInitOrArchived == 0); CHECK(e.nLive == 0); if (initEra) { @@ -566,25 +665,25 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry", SECTION("dead and init entries annihilate multiple live entries via " "separate buckets") { - auto bold = - Bucket::fresh(bm, vers, {initEntry}, {}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - auto bmed = Bucket::fresh( + auto bold = LiveBucket::fresh(bm, vers, {initEntry}, {}, {}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); + auto bmed = LiveBucket::fresh( bm, vers, {}, {otherLiveA, otherLiveB, liveEntry, otherLiveC}, {}, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); - auto bnew = - Bucket::fresh(bm, vers, {}, {}, {deadEntry}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); + auto bnew = LiveBucket::fresh(bm, vers, {}, {}, {deadEntry}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); EntryCounts eold(bold), emed(bmed), enew(bnew); if (initEra) { CHECK(eold.nMeta == 1); CHECK(emed.nMeta == 1); CHECK(enew.nMeta == 1); - CHECK(eold.nInit == 1); + CHECK(eold.nInitOrArchived == 1); CHECK(eold.nLive == 0); } else @@ -592,35 +691,35 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry", CHECK(eold.nMeta == 0); CHECK(emed.nMeta == 0); CHECK(enew.nMeta == 0); - CHECK(eold.nInit == 0); + CHECK(eold.nInitOrArchived == 0); CHECK(eold.nLive == 1); } CHECK(eold.nDead == 0); - CHECK(emed.nInit == 0); + CHECK(emed.nInitOrArchived == 0); CHECK(emed.nLive == 4); CHECK(emed.nDead == 0); - CHECK(enew.nInit == 0); + CHECK(enew.nInitOrArchived == 0); CHECK(enew.nLive == 0); CHECK(enew.nDead == 1); auto bmerge1 = Bucket::merge( bm, cfg.LEDGER_PROTOCOL_VERSION, bold, bmed, /*shadows=*/{}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); auto bmerge2 = Bucket::merge( bm, cfg.LEDGER_PROTOCOL_VERSION, bmerge1, bnew, /*shadows=*/{}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); EntryCounts emerge1(bmerge1), emerge2(bmerge2); if (initEra) { CHECK(emerge1.nMeta == 1); - CHECK(emerge1.nInit == 1); + CHECK(emerge1.nInitOrArchived == 1); CHECK(emerge1.nLive == 3); CHECK(emerge2.nMeta == 1); @@ -629,14 +728,14 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry", else { CHECK(emerge1.nMeta == 0); - CHECK(emerge1.nInit == 0); + CHECK(emerge1.nInitOrArchived == 0); CHECK(emerge1.nLive == 4); CHECK(emerge2.nMeta == 0); CHECK(emerge2.nDead == 1); } CHECK(emerge1.nDead == 0); - CHECK(emerge2.nInit == 0); + CHECK(emerge2.nInitOrArchived == 0); CHECK(emerge2.nLive == 3); } }); @@ -655,7 +754,8 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", // Whether we're in the era of supporting or not-supporting INITENTRY. bool initEra = protocolVersionStartsFrom( - vers, Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY); + vers, + LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY); CLOG_INFO(Bucket, "=== finished buckets for initial account == "); @@ -680,36 +780,36 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", // In pre-11 versions, shadows _do_ eliminate lifecycle entries // (INIT/DEAD). In 11-and-after versions, shadows _don't_ eliminate // lifecycle entries. - auto shadow = - Bucket::fresh(bm, vers, {}, {liveEntry}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - auto b1 = - Bucket::fresh(bm, vers, {initEntry}, {}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - auto b2 = - Bucket::fresh(bm, vers, {otherInitA}, {}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); + auto shadow = LiveBucket::fresh(bm, vers, {}, {liveEntry}, {}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); + auto b1 = LiveBucket::fresh(bm, vers, {initEntry}, {}, {}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); + auto b2 = LiveBucket::fresh(bm, vers, {otherInitA}, {}, {}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); auto merged = Bucket::merge(bm, cfg.LEDGER_PROTOCOL_VERSION, b1, b2, /*shadows=*/{shadow}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); EntryCounts e(merged); if (initEra) { CHECK(e.nMeta == 1); - CHECK(e.nInit == 2); + CHECK(e.nInitOrArchived == 2); CHECK(e.nLive == 0); CHECK(e.nDead == 0); } else { CHECK(e.nMeta == 0); - CHECK(e.nInit == 0); + CHECK(e.nInitOrArchived == 0); CHECK(e.nLive == 1); CHECK(e.nDead == 0); } @@ -722,26 +822,26 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", // INIT. See comment in `maybePut` in Bucket.cpp. // // (level1 is newest here, level5 is oldest) - auto level1 = - Bucket::fresh(bm, vers, {}, {}, {deadEntry}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - auto level2 = - Bucket::fresh(bm, vers, {initEntry2}, {}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - auto level3 = - Bucket::fresh(bm, vers, {}, {}, {deadEntry}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - auto level4 = - Bucket::fresh(bm, vers, {}, {}, {}, /*countMergeEvents=*/true, - clock.getIOContext(), - /*doFsync=*/true); - auto level5 = - Bucket::fresh(bm, vers, {initEntry}, {}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); + auto level1 = LiveBucket::fresh(bm, vers, {}, {}, {deadEntry}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); + auto level2 = LiveBucket::fresh(bm, vers, {initEntry2}, {}, {}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); + auto level3 = LiveBucket::fresh(bm, vers, {}, {}, {deadEntry}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); + auto level4 = LiveBucket::fresh(bm, vers, {}, {}, {}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); + auto level5 = LiveBucket::fresh(bm, vers, {initEntry}, {}, {}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); // Do a merge between levels 4 and 3, with shadows from 2 and 1, // risking shadowing-out level 3. Level 4 is a placeholder here, @@ -750,7 +850,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", auto merge43 = Bucket::merge(bm, cfg.LEDGER_PROTOCOL_VERSION, level4, level3, /*shadows=*/{level2, level1}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); EntryCounts e43(merge43); @@ -758,7 +858,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", { // New-style, we preserve the dead entry. CHECK(e43.nMeta == 1); - CHECK(e43.nInit == 0); + CHECK(e43.nInitOrArchived == 0); CHECK(e43.nLive == 0); CHECK(e43.nDead == 1); } @@ -766,7 +866,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", { // Old-style, we shadowed-out the dead entry. CHECK(e43.nMeta == 0); - CHECK(e43.nInit == 0); + CHECK(e43.nInitOrArchived == 0); CHECK(e43.nLive == 0); CHECK(e43.nDead == 0); } @@ -776,7 +876,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", auto merge21 = Bucket::merge(bm, cfg.LEDGER_PROTOCOL_VERSION, level2, level1, /*shadows=*/{}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); EntryCounts e21(merge21); @@ -784,7 +884,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", { // New-style, they mutually annihilate. CHECK(e21.nMeta == 1); - CHECK(e21.nInit == 0); + CHECK(e21.nInitOrArchived == 0); CHECK(e21.nLive == 0); CHECK(e21.nDead == 0); } @@ -792,7 +892,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", { // Old-style, we keep the tombstone around. CHECK(e21.nMeta == 0); - CHECK(e21.nInit == 0); + CHECK(e21.nInitOrArchived == 0); CHECK(e21.nLive == 0); CHECK(e21.nDead == 1); } @@ -802,13 +902,13 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", auto merge4321 = Bucket::merge(bm, cfg.LEDGER_PROTOCOL_VERSION, merge43, merge21, /*shadows=*/{}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); auto merge54321 = Bucket::merge( bm, cfg.LEDGER_PROTOCOL_VERSION, level5, merge4321, /*shadows=*/{}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); EntryCounts e54321(merge21); @@ -816,7 +916,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", { // New-style, we should get a second mutual annihilation. CHECK(e54321.nMeta == 1); - CHECK(e54321.nInit == 0); + CHECK(e54321.nInitOrArchived == 0); CHECK(e54321.nLive == 0); CHECK(e54321.nDead == 0); } @@ -824,7 +924,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", { // Old-style, the tombstone should clobber the live entry. CHECK(e54321.nMeta == 0); - CHECK(e54321.nInit == 0); + CHECK(e54321.nInitOrArchived == 0); CHECK(e54321.nLive == 0); CHECK(e54321.nDead == 1); } @@ -839,18 +939,18 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", // `maybePut` in Bucket.cpp. // // (level1 is newest here, level3 is oldest) - auto level1 = - Bucket::fresh(bm, vers, {}, {}, {deadEntry}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - auto level2 = - Bucket::fresh(bm, vers, {}, {liveEntry}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - auto level3 = - Bucket::fresh(bm, vers, {initEntry}, {}, {}, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); + auto level1 = LiveBucket::fresh(bm, vers, {}, {}, {deadEntry}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); + auto level2 = LiveBucket::fresh(bm, vers, {}, {liveEntry}, {}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); + auto level3 = LiveBucket::fresh(bm, vers, {initEntry}, {}, {}, + /*countMergeEvents=*/true, + clock.getIOContext(), + /*doFsync=*/true); // Do a merge between levels 3 and 2, with shadow from 1, risking // shadowing-out the init on level 3. Level 2 is a placeholder here, @@ -859,7 +959,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", auto merge32 = Bucket::merge(bm, cfg.LEDGER_PROTOCOL_VERSION, level3, level2, /*shadows=*/{level1}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); EntryCounts e32(merge32); @@ -867,7 +967,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", { // New-style, we preserve the init entry. CHECK(e32.nMeta == 1); - CHECK(e32.nInit == 1); + CHECK(e32.nInitOrArchived == 1); CHECK(e32.nLive == 0); CHECK(e32.nDead == 0); } @@ -875,7 +975,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", { // Old-style, we shadowed-out the live and init entries. CHECK(e32.nMeta == 0); - CHECK(e32.nInit == 0); + CHECK(e32.nInitOrArchived == 0); CHECK(e32.nLive == 0); CHECK(e32.nDead == 0); } @@ -886,7 +986,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", auto merge321 = Bucket::merge(bm, cfg.LEDGER_PROTOCOL_VERSION, merge32, level1, /*shadows=*/{}, - /*keepDeadEntries=*/true, + /*keepTombstoneEntries=*/true, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); EntryCounts e321(merge321); @@ -894,7 +994,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", { // New-style, init meets dead and they annihilate. CHECK(e321.nMeta == 1); - CHECK(e321.nInit == 0); + CHECK(e321.nInitOrArchived == 0); CHECK(e321.nLive == 0); CHECK(e321.nDead == 0); } @@ -903,7 +1003,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", // Old-style, init was already shadowed-out, so dead // accumulates. CHECK(e321.nMeta == 0); - CHECK(e321.nInit == 0); + CHECK(e321.nInitOrArchived == 0); CHECK(e321.nLive == 0); CHECK(e321.nDead == 1); } @@ -911,51 +1011,6 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows", }); } -TEST_CASE_VERSIONS("legacy bucket apply", "[bucket]") -{ - VirtualClock clock; - Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS)); - for_versions_with_differing_bucket_logic(cfg, [&](Config const& cfg) { - Application::pointer app = createTestApplication(clock, cfg); - - std::vector live(10), noLive; - std::vector dead, noDead; - - for (auto& e : live) - { - e.data.type(ACCOUNT); - auto& a = e.data.account(); - a = LedgerTestUtils::generateValidAccountEntry(5); - a.balance = 1000000000; - dead.emplace_back(LedgerEntryKey(e)); - } - - std::shared_ptr birth = Bucket::fresh( - app->getBucketManager(), getAppLedgerVersion(app), {}, live, noDead, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - - std::shared_ptr death = Bucket::fresh( - app->getBucketManager(), getAppLedgerVersion(app), {}, noLive, dead, - /*countMergeEvents=*/true, clock.getIOContext(), - /*doFsync=*/true); - - CLOG_INFO(Bucket, "Applying bucket with {} live entries", live.size()); - birth->apply(*app); - { - auto count = app->getLedgerTxnRoot().countObjects(ACCOUNT); - REQUIRE(count == live.size() + 1 /* root account */); - } - - CLOG_INFO(Bucket, "Applying bucket with {} dead entries", dead.size()); - death->apply(*app); - { - auto count = app->getLedgerTxnRoot().countObjects(ACCOUNT); - REQUIRE(count == 1 /* root account */); - } - }); -} - TEST_CASE("bucket apply bench", "[bucketbench][!hide]") { auto runtest = [](Config::TestDbMode mode) { @@ -973,7 +1028,7 @@ TEST_CASE("bucket apply bench", "[bucketbench][!hide]") a = LedgerTestUtils::generateValidAccountEntry(5); } - std::shared_ptr birth = Bucket::fresh( + std::shared_ptr birth = LiveBucket::fresh( app->getBucketManager(), getAppLedgerVersion(app), {}, live, noDead, /*countMergeEvents=*/true, clock.getIOContext(), /*doFsync=*/true); diff --git a/src/catchup/ApplyBucketsWork.cpp b/src/catchup/ApplyBucketsWork.cpp index 8a871b5f31..cd89b5e79b 100644 --- a/src/catchup/ApplyBucketsWork.cpp +++ b/src/catchup/ApplyBucketsWork.cpp @@ -51,45 +51,30 @@ class TempLedgerVersionSetter : NonMovableOrCopyable } }; -uint32_t -ApplyBucketsWork::startingLevel() -{ - return mApp.getConfig().isUsingBucketListDB() ? 0 - : BucketList::kNumLevels - 1; -} - ApplyBucketsWork::ApplyBucketsWork( Application& app, - std::map> const& buckets, - HistoryArchiveState const& applyState, uint32_t maxProtocolVersion, - std::function onlyApply) + std::map> const& buckets, + HistoryArchiveState const& applyState, uint32_t maxProtocolVersion) : Work(app, "apply-buckets", BasicWork::RETRY_NEVER) , mBuckets(buckets) , mApplyState(applyState) - , mEntryTypeFilter(onlyApply) , mTotalSize(0) - , mLevel(startingLevel()) + , mLevel(0) , mMaxProtocolVersion(maxProtocolVersion) , mCounters(app.getClock().now()) + , mIsApplyInvariantEnabled( + app.getInvariantManager().isBucketApplyInvariantEnabled()) { } -ApplyBucketsWork::ApplyBucketsWork( - Application& app, - std::map> const& buckets, - HistoryArchiveState const& applyState, uint32_t maxProtocolVersion) - : ApplyBucketsWork(app, buckets, applyState, maxProtocolVersion, - [](LedgerEntryType) { return true; }) -{ -} - -std::shared_ptr +std::shared_ptr ApplyBucketsWork::getBucket(std::string const& hash) { auto i = mBuckets.find(hash); - auto b = (i != mBuckets.end()) - ? i->second - : mApp.getBucketManager().getBucketByHash(hexToBin256(hash)); + auto b = + (i != mBuckets.end()) + ? i->second + : mApp.getBucketManager().getLiveBucketByHash(hexToBin256(hash)); releaseAssert(b); return b; } @@ -109,40 +94,19 @@ ApplyBucketsWork::doReset() mLastPos = 0; mBucketToApplyIndex = 0; mMinProtocolVersionSeen = UINT32_MAX; + mSeenKeysBeforeApply.clear(); mSeenKeys.clear(); mBucketsToApply.clear(); mBucketApplicator.reset(); if (!isAborting()) { - if (mApp.getConfig().isUsingBucketListDB()) - { - // The current size of this set is 1.6 million during BucketApply - // (as of 12/20/23). There's not a great way to estimate this, so - // reserving with some extra wiggle room - mSeenKeys.reserve(2'000'000); - } + // The current size of this set is 1.6 million during BucketApply + // (as of 12/20/23). There's not a great way to estimate this, so + // reserving with some extra wiggle room + mSeenKeys.reserve(2'000'000); - // When applying buckets with accounts, we have to make sure that the - // root account has been removed. This comes into play, for example, - // when applying buckets from genesis the root account already exists. - if (mEntryTypeFilter(ACCOUNT)) - { - TempLedgerVersionSetter tlvs(mApp, mMaxProtocolVersion); - { - SecretKey skey = SecretKey::fromSeed(mApp.getNetworkID()); - - LedgerTxn ltx(mApp.getLedgerTxnRoot()); - auto rootAcc = loadAccount(ltx, skey.getPublicKey()); - if (rootAcc) - { - rootAcc.erase(); - } - ltx.commit(); - } - } - - auto addBucket = [this](std::shared_ptr const& bucket) { + auto addBucket = [this](std::shared_ptr const& bucket) { if (bucket->getSize() > 0) { mTotalBuckets++; @@ -150,30 +114,16 @@ ApplyBucketsWork::doReset() } mBucketsToApply.emplace_back(bucket); }; - // If using bucketlist DB, we iterate through the BucketList in order - // (i.e. L0 curr, L0 snap, L1 curr, etc) as we are just applying offers - // (and can keep track of all seen keys). Otherwise, we iterate in - // reverse order (i.e. L N snap, L N curr, L N-1 snap, etc.) as we are - // applying all entry types and cannot keep track of all seen keys as it - // would be too large. - if (mApp.getConfig().isUsingBucketListDB()) - { - for (auto const& hsb : mApplyState.currentBuckets) - { - addBucket(getBucket(hsb.curr)); - addBucket(getBucket(hsb.snap)); - } - } - else + + // We iterate through the live BucketList in + // order (i.e. L0 curr, L0 snap, L1 curr, etc) as we are just applying + // offers (and can keep track of all seen keys). + for (auto const& hsb : mApplyState.currentBuckets) { - for (auto iter = mApplyState.currentBuckets.rbegin(); - iter != mApplyState.currentBuckets.rend(); ++iter) - { - auto const& hsb = *iter; - addBucket(getBucket(hsb.snap)); - addBucket(getBucket(hsb.curr)); - } + addBucket(getBucket(hsb.curr)); + addBucket(getBucket(hsb.snap)); } + // estimate the number of ledger entries contained in those buckets // use accounts as a rough approximator as to overestimate a bit // (default BucketEntry contains a default AccountEntry) @@ -198,11 +148,19 @@ ApplyBucketsWork::startBucket() ZoneScoped; auto bucket = mBucketsToApply.at(mBucketToApplyIndex); mMinProtocolVersionSeen = - std::min(mMinProtocolVersionSeen, Bucket::getBucketVersion(bucket)); + std::min(mMinProtocolVersionSeen, bucket->getBucketVersion()); + + // Take a snapshot of seen keys before applying the bucket, only if + // invariants are enabled since this is expensive. + if (mIsApplyInvariantEnabled) + { + mSeenKeysBeforeApply = mSeenKeys; + } + // Create a new applicator for the bucket. mBucketApplicator = std::make_unique( mApp, mMaxProtocolVersion, mMinProtocolVersionSeen, mLevel, bucket, - mEntryTypeFilter, mSeenKeys); + mSeenKeys); } void @@ -213,54 +171,36 @@ ApplyBucketsWork::prepareForNextBucket() mApp.getCatchupManager().bucketsApplied(); mBucketToApplyIndex++; // If mBucketToApplyIndex is even, we are progressing to the next - // level, if we are using BucketListDB, this is the next greater - // level, otherwise it's the next lower level. + // level if (mBucketToApplyIndex % 2 == 0) { - mLevel = - mApp.getConfig().isUsingBucketListDB() ? mLevel + 1 : mLevel - 1; + ++mLevel; } } -// We iterate through the BucketList either in-order (level 0 curr, level 0 -// snap, level 1 curr, etc) when only applying offers, or in reverse order -// (level 9 curr, level 8 snap, level 8 curr, etc) when applying all entry -// types. When only applying offers, we keep track of the keys we have already +// We iterate through the live BucketList either in-order (level 0 curr, level 0 +// snap, level 1 curr, etc). We keep track of the keys we have already // seen, and only apply an entry to the DB if it has not been seen before. This // allows us to perform a single write to the DB and ensure that only the newest // version is written. // -// When applying all entry types, this seen keys set would be too large. Since -// there can be no seen keys set, if we were to apply every entry in order, we -// would overwrite the newest version of an entry with an older version as we -// iterate through the BucketList. Due to this, we iterate in reverse order such -// that the newest version of a key is written last, overwriting the older -// versions. This is much slower due to DB churn. - BasicWork::State ApplyBucketsWork::doWork() { ZoneScoped; // Step 1: index buckets. Step 2: apply buckets. Step 3: assume state - bool isUsingBucketListDB = mApp.getConfig().isUsingBucketListDB(); - if (isUsingBucketListDB) + if (!mIndexBucketsWork) { - // Step 1: index buckets. - if (!mIndexBucketsWork) - { - // Spawn indexing work for the first time - mIndexBucketsWork = addWork(mBucketsToApply); - return State::WORK_RUNNING; - } - else if (mIndexBucketsWork->getState() != - BasicWork::State::WORK_SUCCESS) - { - // Exit early if indexing work is still running, or failed - return mIndexBucketsWork->getState(); - } + // Spawn indexing work for the first time + mIndexBucketsWork = addWork(mBucketsToApply); + return State::WORK_RUNNING; + } - // Otherwise, continue with next steps + else if (mIndexBucketsWork->getState() != BasicWork::State::WORK_SUCCESS) + { + // Exit early if indexing work is still running, or failed + return mIndexBucketsWork->getState(); } if (!mAssumeStateWork) @@ -280,8 +220,7 @@ ApplyBucketsWork::doWork() } } - auto isCurr = isUsingBucketListDB ? mBucketToApplyIndex % 2 == 0 - : mBucketToApplyIndex % 2 == 1; + auto isCurr = mBucketToApplyIndex % 2 == 0; if (mBucketApplicator) { TempLedgerVersionSetter tlvs(mApp, mMaxProtocolVersion); @@ -292,10 +231,13 @@ ApplyBucketsWork::doWork() return State::WORK_RUNNING; } // Application complete, check invariants and prepare for next - // bucket. + // bucket. Applying a bucket updates mSeenKeys with the keys applied + // by that bucket, so we need to provide a copy of the keys before + // application to the invariant check. mApp.getInvariantManager().checkOnBucketApply( mBucketsToApply.at(mBucketToApplyIndex), - mApplyState.currentLedger, mLevel, isCurr, mEntryTypeFilter); + mApplyState.currentLedger, mLevel, isCurr, + mSeenKeysBeforeApply); prepareForNextBucket(); } if (!appliedAllBuckets()) @@ -365,8 +307,7 @@ ApplyBucketsWork::getStatus() const { // This status string only applies to step 2 when we actually apply the // buckets. - bool doneIndexing = !mApp.getConfig().isUsingBucketListDB() || - (mIndexBucketsWork && mIndexBucketsWork->isDone()); + bool doneIndexing = mIndexBucketsWork && mIndexBucketsWork->isDone(); if (doneIndexing && !mSpawnedAssumeStateWork) { auto size = mTotalSize == 0 ? 0 : (100 * mAppliedSize / mTotalSize); diff --git a/src/catchup/ApplyBucketsWork.h b/src/catchup/ApplyBucketsWork.h index 276e4caa05..bdff18bed1 100644 --- a/src/catchup/ApplyBucketsWork.h +++ b/src/catchup/ApplyBucketsWork.h @@ -12,8 +12,7 @@ namespace stellar { class AssumeStateWork; -class BucketLevel; -class BucketList; +class LiveBucketList; class Bucket; class IndexBucketsWork; struct HistoryArchiveState; @@ -21,9 +20,8 @@ struct LedgerHeaderHistoryEntry; class ApplyBucketsWork : public Work { - std::map> const& mBuckets; + std::map> const& mBuckets; HistoryArchiveState const& mApplyState; - std::function mEntryTypeFilter; bool mSpawnedAssumeStateWork{false}; std::shared_ptr mAssumeStateWork{}; @@ -39,17 +37,18 @@ class ApplyBucketsWork : public Work uint32_t mLevel{0}; uint32_t mMaxProtocolVersion{0}; uint32_t mMinProtocolVersionSeen{UINT32_MAX}; + std::unordered_set mSeenKeysBeforeApply; std::unordered_set mSeenKeys; - std::vector> mBucketsToApply; + std::vector> mBucketsToApply; std::unique_ptr mBucketApplicator; bool mDelayChecked{false}; BucketApplicator::Counters mCounters; + bool const mIsApplyInvariantEnabled; void advance(std::string const& name, BucketApplicator& applicator); - std::shared_ptr getBucket(std::string const& bucketHash); + std::shared_ptr getBucket(std::string const& bucketHash); - uint32_t startingLevel(); bool appliedAllBuckets() const; void startBucket(); void prepareForNextBucket(); @@ -57,13 +56,8 @@ class ApplyBucketsWork : public Work public: ApplyBucketsWork( Application& app, - std::map> const& buckets, + std::map> const& buckets, HistoryArchiveState const& applyState, uint32_t maxProtocolVersion); - ApplyBucketsWork( - Application& app, - std::map> const& buckets, - HistoryArchiveState const& applyState, uint32_t maxProtocolVersion, - std::function onlyApply); ~ApplyBucketsWork() = default; std::string getStatus() const override; diff --git a/src/catchup/ApplyBufferedLedgersWork.cpp b/src/catchup/ApplyBufferedLedgersWork.cpp index 6af378daf4..72d396c5a2 100644 --- a/src/catchup/ApplyBufferedLedgersWork.cpp +++ b/src/catchup/ApplyBufferedLedgersWork.cpp @@ -59,7 +59,7 @@ ApplyBufferedLedgersWork::onRun() auto applyLedger = std::make_shared(mApp, lcd); auto predicate = [](Application& app) { - auto& bl = app.getBucketManager().getBucketList(); + auto& bl = app.getBucketManager().getLiveBucketList(); auto& lm = app.getLedgerManager(); bl.resolveAnyReadyFutures(); return bl.futuresAllResolved( diff --git a/src/catchup/ApplyCheckpointWork.cpp b/src/catchup/ApplyCheckpointWork.cpp index 86f6bf01b5..ad51ded4d0 100644 --- a/src/catchup/ApplyCheckpointWork.cpp +++ b/src/catchup/ApplyCheckpointWork.cpp @@ -311,7 +311,7 @@ ApplyCheckpointWork::onRun() auto applyLedger = std::make_shared(mApp, *lcd); auto predicate = [](Application& app) { - auto& bl = app.getBucketManager().getBucketList(); + auto& bl = app.getBucketManager().getLiveBucketList(); auto& lm = app.getLedgerManager(); bl.resolveAnyReadyFutures(); return bl.futuresAllResolved( diff --git a/src/catchup/AssumeStateWork.cpp b/src/catchup/AssumeStateWork.cpp index e12ed8ac98..1305ccc711 100644 --- a/src/catchup/AssumeStateWork.cpp +++ b/src/catchup/AssumeStateWork.cpp @@ -26,12 +26,12 @@ AssumeStateWork::AssumeStateWork(Application& app, // Maintain reference to all Buckets in HAS to avoid garbage collection, // including future buckets that have already finished merging auto& bm = mApp.getBucketManager(); - for (uint32_t i = 0; i < BucketList::kNumLevels; ++i) + for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) { auto curr = - bm.getBucketByHash(hexToBin256(mHas.currentBuckets.at(i).curr)); + bm.getLiveBucketByHash(hexToBin256(mHas.currentBuckets.at(i).curr)); auto snap = - bm.getBucketByHash(hexToBin256(mHas.currentBuckets.at(i).snap)); + bm.getLiveBucketByHash(hexToBin256(mHas.currentBuckets.at(i).snap)); if (!(curr && snap)) { throw std::runtime_error("Missing bucket files while " @@ -44,7 +44,7 @@ AssumeStateWork::AssumeStateWork(Application& app, if (nextFuture.hasOutputHash()) { auto nextBucket = - bm.getBucketByHash(hexToBin256(nextFuture.getOutputHash())); + bm.getLiveBucketByHash(hexToBin256(nextFuture.getOutputHash())); if (!nextBucket) { throw std::runtime_error("Missing future bucket files while " @@ -64,10 +64,7 @@ AssumeStateWork::doWork() std::vector> seq; // Index Bucket files - if (mApp.getConfig().isUsingBucketListDB()) - { - seq.push_back(std::make_shared(mApp, mBuckets)); - } + seq.push_back(std::make_shared(mApp, mBuckets)); // Add bucket files to BucketList and restart merges auto assumeStateCB = [&has = mHas, diff --git a/src/catchup/AssumeStateWork.h b/src/catchup/AssumeStateWork.h index 689cc1f1f6..92dc4b903c 100644 --- a/src/catchup/AssumeStateWork.h +++ b/src/catchup/AssumeStateWork.h @@ -11,6 +11,7 @@ namespace stellar class Bucket; struct HistoryArchiveState; +class LiveBucket; class AssumeStateWork : public Work { @@ -21,7 +22,7 @@ class AssumeStateWork : public Work // Keep strong reference to buckets in HAS so they are not garbage // collected during indexing - std::vector> mBuckets{}; + std::vector> mBuckets{}; public: AssumeStateWork(Application& app, HistoryArchiveState const& has, diff --git a/src/catchup/CatchupManager.h b/src/catchup/CatchupManager.h index 61c9b5821f..46318c5e98 100644 --- a/src/catchup/CatchupManager.h +++ b/src/catchup/CatchupManager.h @@ -63,7 +63,7 @@ class CatchupManager virtual void startCatchup(CatchupConfiguration configuration, std::shared_ptr archive, - std::set> bucketsToRetain) = 0; + std::set> bucketsToRetain) = 0; // Return status of catchup for or empty string, if no catchup in progress virtual std::string getStatus() const = 0; diff --git a/src/catchup/CatchupManagerImpl.cpp b/src/catchup/CatchupManagerImpl.cpp index b1eca69dd7..11db47260f 100644 --- a/src/catchup/CatchupManagerImpl.cpp +++ b/src/catchup/CatchupManagerImpl.cpp @@ -238,7 +238,7 @@ CatchupManagerImpl::processLedger(LedgerCloseData const& ledgerData) void CatchupManagerImpl::startCatchup( CatchupConfiguration configuration, std::shared_ptr archive, - std::set> bucketsToRetain) + std::set> bucketsToRetain) { ZoneScoped; auto lastClosedLedger = mApp.getLedgerManager().getLastClosedLedgerNum(); diff --git a/src/catchup/CatchupManagerImpl.h b/src/catchup/CatchupManagerImpl.h index 8c04a344aa..90e57bcbd7 100644 --- a/src/catchup/CatchupManagerImpl.h +++ b/src/catchup/CatchupManagerImpl.h @@ -62,10 +62,10 @@ class CatchupManagerImpl : public CatchupManager ~CatchupManagerImpl() override; void processLedger(LedgerCloseData const& ledgerData) override; - void - startCatchup(CatchupConfiguration configuration, - std::shared_ptr archive, - std::set> bucketsToRetain) override; + void startCatchup( + CatchupConfiguration configuration, + std::shared_ptr archive, + std::set> bucketsToRetain) override; std::string getStatus() const override; diff --git a/src/catchup/CatchupWork.cpp b/src/catchup/CatchupWork.cpp index 760c15436c..b854e80414 100644 --- a/src/catchup/CatchupWork.cpp +++ b/src/catchup/CatchupWork.cpp @@ -77,7 +77,7 @@ setHerderStateTo(FileTransferInfo const& ft, uint32_t ledger, Application& app) CatchupWork::CatchupWork(Application& app, CatchupConfiguration catchupConfiguration, - std::set> bucketsToRetain, + std::set> bucketsToRetain, std::shared_ptr archive) : Work(app, "catchup", BasicWork::RETRY_NEVER) , mLocalState{app.getLedgerManager().getLastClosedLedgerHAS()} @@ -214,10 +214,7 @@ CatchupWork::downloadApplyBuckets() // the database. This guarantees that we clear that state the next time // the application starts. auto& ps = mApp.getPersistentState(); - for (auto let : xdr::xdr_traits::enum_values()) - { - ps.setRebuildForType(static_cast(let)); - } + ps.setRebuildForOfferTable(); std::vector> seq; auto version = mApp.getConfig().LEDGER_PROTOCOL_VERSION; @@ -245,20 +242,8 @@ CatchupWork::downloadApplyBuckets() version = mVerifiedLedgerRangeStart.header.ledgerVersion; } - std::shared_ptr applyBuckets; - if (mApp.getConfig().isUsingBucketListDB()) - { - // Only apply unsupported BucketListDB types to SQL DB when BucketList - // lookup is enabled - applyBuckets = std::make_shared( - mApp, mBuckets, *mBucketHAS, version, - BucketIndex::typeNotSupported); - } - else - { - applyBuckets = std::make_shared(mApp, mBuckets, - *mBucketHAS, version); - } + auto applyBuckets = std::make_shared( + mApp, mBuckets, *mBucketHAS, version); seq.push_back(applyBuckets); return std::make_shared(mApp, "download-verify-apply-buckets", seq, RETRY_NEVER); @@ -531,10 +516,7 @@ CatchupWork::runCatchupStep() // persistently available locally so it will return us to the // correct state. auto& ps = mApp.getPersistentState(); - for (auto let : xdr::xdr_traits::enum_values()) - { - ps.clearRebuildForType(static_cast(let)); - } + ps.clearRebuildForOfferTable(); } } else if (mTransactionsVerifyApplySeq) diff --git a/src/catchup/CatchupWork.h b/src/catchup/CatchupWork.h index ed36c75f5c..d650bbc910 100644 --- a/src/catchup/CatchupWork.h +++ b/src/catchup/CatchupWork.h @@ -47,7 +47,7 @@ class CatchupWork : public Work protected: HistoryArchiveState mLocalState; std::unique_ptr mDownloadDir; - std::map> mBuckets; + std::map> mBuckets; void doReset() override; BasicWork::State doWork() override; @@ -65,7 +65,7 @@ class CatchupWork : public Work static uint32_t const PUBLISH_QUEUE_MAX_SIZE; CatchupWork(Application& app, CatchupConfiguration catchupConfiguration, - std::set> bucketsToRetain, + std::set> bucketsToRetain, std::shared_ptr archive = nullptr); virtual ~CatchupWork(); std::string getStatus() const override; @@ -128,6 +128,6 @@ class CatchupWork : public Work std::optional mHAS; std::optional mBucketHAS; - std::set> mRetainedBuckets; + std::set> mRetainedBuckets; }; } diff --git a/src/catchup/DownloadApplyTxsWork.cpp b/src/catchup/DownloadApplyTxsWork.cpp index 1746060d69..b91a9f7f9f 100644 --- a/src/catchup/DownloadApplyTxsWork.cpp +++ b/src/catchup/DownloadApplyTxsWork.cpp @@ -83,7 +83,7 @@ DownloadApplyTxsWork::yieldMoreWork() auto maybeWaitForMerges = [](Application& app) { if (app.getConfig().CATCHUP_WAIT_MERGES_TX_APPLY_FOR_TESTING) { - auto& bl = app.getBucketManager().getBucketList(); + auto& bl = app.getBucketManager().getLiveBucketList(); bl.resolveAnyReadyFutures(); return bl.futuresAllResolved(); } diff --git a/src/catchup/IndexBucketsWork.cpp b/src/catchup/IndexBucketsWork.cpp index 5019b48757..32eb6598b3 100644 --- a/src/catchup/IndexBucketsWork.cpp +++ b/src/catchup/IndexBucketsWork.cpp @@ -15,7 +15,7 @@ namespace stellar { IndexBucketsWork::IndexWork::IndexWork(Application& app, - std::shared_ptr b) + std::shared_ptr b) : BasicWork(app, "index-work", BasicWork::RETRY_NEVER), mBucket(b) { } @@ -57,7 +57,7 @@ IndexBucketsWork::IndexWork::postWork() auto indexFilename = bm.bucketIndexFilename(self->mBucket->getHash()); - if (bm.getConfig().isPersistingBucketListDBIndexes() && + if (bm.getConfig().BUCKETLIST_DB_PERSIST_INDEX && fs::exists(indexFilename)) { self->mIndex = BucketIndex::load(bm, indexFilename, @@ -80,7 +80,8 @@ IndexBucketsWork::IndexWork::postWork() if (!self->mIndex) { - self->mIndex = BucketIndex::createIndex( + // TODO: Fix this when archive BucketLists assume state + self->mIndex = BucketIndex::createIndex( bm, self->mBucket->getFilename(), self->mBucket->getHash()); } @@ -104,7 +105,7 @@ IndexBucketsWork::IndexWork::postWork() } IndexBucketsWork::IndexBucketsWork( - Application& app, std::vector> const& buckets) + Application& app, std::vector> const& buckets) : Work(app, "index-bucketList", BasicWork::RETRY_NEVER), mBuckets(buckets) { } @@ -130,7 +131,7 @@ void IndexBucketsWork::spawnWork() { UnorderedSet indexedBuckets; - auto spawnIndexWork = [&](std::shared_ptr const& b) { + auto spawnIndexWork = [&](std::shared_ptr const& b) { // Don't index empty bucket or buckets that are already being // indexed. Sometimes one level's snap bucket may be another // level's future bucket. The indexing job may have started but diff --git a/src/catchup/IndexBucketsWork.h b/src/catchup/IndexBucketsWork.h index 65a0f0e18a..ed44289c4e 100644 --- a/src/catchup/IndexBucketsWork.h +++ b/src/catchup/IndexBucketsWork.h @@ -13,33 +13,34 @@ namespace stellar class Bucket; class BucketIndex; class BucketManager; +class LiveBucket; class IndexBucketsWork : public Work { class IndexWork : public BasicWork { - std::shared_ptr mBucket; + std::shared_ptr mBucket; std::unique_ptr mIndex; bool mDone{false}; void postWork(); public: - IndexWork(Application& app, std::shared_ptr b); + IndexWork(Application& app, std::shared_ptr b); protected: State onRun() override; bool onAbort() override; }; - std::vector> const& mBuckets; + std::vector> const& mBuckets; bool mWorkSpawned{false}; void spawnWork(); public: IndexBucketsWork(Application& app, - std::vector> const& buckets); + std::vector> const& buckets); protected: State doWork() override; diff --git a/src/database/Database.cpp b/src/database/Database.cpp index e06f1ff016..433df01bff 100644 --- a/src/database/Database.cpp +++ b/src/database/Database.cpp @@ -248,13 +248,9 @@ Database::upgradeToCurrentSchema() putSchemaVersion(vers); } - // While not really a schema upgrade, we need to upgrade the DB when - // BucketListDB is enabled. - if (mApp.getConfig().isUsingBucketListDB()) - { - // Tx meta column no longer supported in BucketListDB - dropTxMetaIfExists(); - } + // Tx meta column no longer supported + dropTxMetaIfExists(); + maybeUpgradeToBucketListDB(); CLOG_INFO(Database, "DB schema is in current version"); releaseAssert(vers == SCHEMA_VERSION); @@ -294,6 +290,50 @@ Database::dropTxMetaIfExists() } } +void +Database::maybeUpgradeToBucketListDB() +{ + if (mApp.getPersistentState().getState(PersistentState::kDBBackend) != + BucketIndex::DB_BACKEND_STATE) + { + CLOG_INFO(Database, "Upgrading to BucketListDB"); + + // Drop all LedgerEntry tables except for offers + CLOG_INFO(Database, "Dropping table accounts"); + getSession() << "DROP TABLE IF EXISTS accounts;"; + + CLOG_INFO(Database, "Dropping table signers"); + getSession() << "DROP TABLE IF EXISTS signers;"; + + CLOG_INFO(Database, "Dropping table claimablebalance"); + getSession() << "DROP TABLE IF EXISTS claimablebalance;"; + + CLOG_INFO(Database, "Dropping table configsettings"); + getSession() << "DROP TABLE IF EXISTS configsettings;"; + + CLOG_INFO(Database, "Dropping table contractcode"); + getSession() << "DROP TABLE IF EXISTS contractcode;"; + + CLOG_INFO(Database, "Dropping table contractdata"); + getSession() << "DROP TABLE IF EXISTS contractdata;"; + + CLOG_INFO(Database, "Dropping table accountdata"); + getSession() << "DROP TABLE IF EXISTS accountdata;"; + + CLOG_INFO(Database, "Dropping table liquiditypool"); + getSession() << "DROP TABLE IF EXISTS liquiditypool;"; + + CLOG_INFO(Database, "Dropping table trustlines"); + getSession() << "DROP TABLE IF EXISTS trustlines;"; + + CLOG_INFO(Database, "Dropping table ttl"); + getSession() << "DROP TABLE IF EXISTS ttl;"; + + mApp.getPersistentState().setState(PersistentState::kDBBackend, + BucketIndex::DB_BACKEND_STATE); + } +} + void Database::putSchemaVersion(unsigned long vers) { diff --git a/src/database/Database.h b/src/database/Database.h index e3ad43b214..73540c2884 100644 --- a/src/database/Database.h +++ b/src/database/Database.h @@ -174,6 +174,7 @@ class Database : NonMovableOrCopyable void upgradeToCurrentSchema(); void dropTxMetaIfExists(); + void maybeUpgradeToBucketListDB(); // Access the underlying SOCI session object soci::session& getSession(); diff --git a/src/database/test/DatabaseTests.cpp b/src/database/test/DatabaseTests.cpp index 4a17cd565c..c2fc838bd3 100644 --- a/src/database/test/DatabaseTests.cpp +++ b/src/database/test/DatabaseTests.cpp @@ -72,7 +72,7 @@ transactionTest(Application::pointer app) TEST_CASE("database smoketest", "[db]") { - Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS); + Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; Application::pointer app = createTestApplication(clock, cfg, true, false); @@ -81,7 +81,7 @@ TEST_CASE("database smoketest", "[db]") TEST_CASE("database on-disk smoketest", "[db]") { - Config const& cfg = getTestConfig(0, Config::TESTDB_ON_DISK_SQLITE); + Config const& cfg = getTestConfig(0, Config::TESTDB_BUCKET_DB_PERSISTENT); VirtualClock clock; Application::pointer app = createTestApplication(clock, cfg, true, false); @@ -201,7 +201,7 @@ checkMVCCIsolation(Application::pointer app) TEST_CASE("sqlite MVCC test", "[db]") { - Config const& cfg = getTestConfig(0, Config::TESTDB_ON_DISK_SQLITE); + Config const& cfg = getTestConfig(0, Config::TESTDB_BUCKET_DB_PERSISTENT); VirtualClock clock; Application::pointer app = createTestApplication(clock, cfg, true, false); checkMVCCIsolation(app); @@ -349,7 +349,7 @@ TEST_CASE("postgres performance", "[db][pgperf][!hide]") TEST_CASE("schema test", "[db]") { - Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS); + Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; Application::pointer app = createTestApplication(clock, cfg); diff --git a/src/herder/test/HerderTests.cpp b/src/herder/test/HerderTests.cpp index 9366adb7c3..0b6dd1d419 100644 --- a/src/herder/test/HerderTests.cpp +++ b/src/herder/test/HerderTests.cpp @@ -1135,7 +1135,7 @@ TEST_CASE("surge pricing", "[herder][txset][soroban]") { SECTION("max 0 ops per ledger") { - Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS)); + Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY)); cfg.TESTING_UPGRADE_MAX_TX_SET_SIZE = 0; VirtualClock clock; @@ -2564,11 +2564,6 @@ TEST_CASE("SCP State", "[herder]") }; auto doTest = [&](bool forceSCP) { - SECTION("sqlite") - { - configure(Config::TestDbMode::TESTDB_ON_DISK_SQLITE); - } - SECTION("bucketlistDB") { configure(Config::TestDbMode::TESTDB_BUCKET_DB_PERSISTENT); @@ -3258,7 +3253,7 @@ TEST_CASE("accept soroban txs after network upgrade", "[soroban][herder]") auto simulation = Topologies::core(4, 1, Simulation::OVER_LOOPBACK, networkID, [](int i) { - auto cfg = getTestConfig(i, Config::TESTDB_ON_DISK_SQLITE); + auto cfg = getTestConfig(i, Config::TESTDB_IN_MEMORY); cfg.TESTING_UPGRADE_MAX_TX_SET_SIZE = 100; cfg.TESTING_UPGRADE_LEDGER_PROTOCOL_VERSION = static_cast(SOROBAN_PROTOCOL_VERSION) - 1; @@ -3687,7 +3682,7 @@ herderExternalizesValuesWithProtocol(uint32_t version) auto networkID = sha256(getTestConfig().NETWORK_PASSPHRASE); auto simulation = std::make_shared( Simulation::OVER_LOOPBACK, networkID, [version](int i) { - auto cfg = getTestConfig(i, Config::TESTDB_ON_DISK_SQLITE); + auto cfg = getTestConfig(i, Config::TESTDB_BUCKET_DB_PERSISTENT); cfg.TESTING_UPGRADE_LEDGER_PROTOCOL_VERSION = version; return cfg; }); diff --git a/src/herder/test/UpgradesTests.cpp b/src/herder/test/UpgradesTests.cpp index 3a1b3adf56..59635fcead 100644 --- a/src/herder/test/UpgradesTests.cpp +++ b/src/herder/test/UpgradesTests.cpp @@ -374,7 +374,7 @@ void testValidateUpgrades(VirtualClock::system_time_point preferredUpgradeDatetime, bool canBeValid) { - auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.TESTING_UPGRADE_LEDGER_PROTOCOL_VERSION = 10; cfg.TESTING_UPGRADE_DESIRED_FEE = 100; cfg.TESTING_UPGRADE_MAX_TX_SET_SIZE = 50; @@ -632,7 +632,7 @@ TEST_CASE("Ledger Manager applies upgrades properly", "[upgrades]") TEST_CASE("config upgrade validation", "[upgrades]") { VirtualClock clock; - auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); auto app = createTestApplication(clock, cfg); auto headerTime = VirtualClock::to_time_t(genesis(0, 2)); @@ -828,7 +828,7 @@ TEST_CASE("config upgrade validation", "[upgrades]") TEST_CASE("config upgrades applied to ledger", "[soroban][upgrades]") { VirtualClock clock; - auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.TESTING_UPGRADE_LEDGER_PROTOCOL_VERSION = static_cast(SOROBAN_PROTOCOL_VERSION) - 1; cfg.USE_CONFIG_FOR_GENESIS = false; @@ -1984,7 +1984,7 @@ TEST_CASE("upgrade to version 11", "[upgrades]") app->getConfig().NODE_SEED); lm.closeLedger(LedgerCloseData(ledgerSeq, txSet, sv)); auto& bm = app->getBucketManager(); - auto& bl = bm.getBucketList(); + auto& bl = bm.getLiveBucketList(); while (!bl.futuresAllResolved()) { std::this_thread::sleep_for(std::chrono::milliseconds(10)); @@ -1998,16 +1998,17 @@ TEST_CASE("upgrade to version 11", "[upgrades]") ledgerSeq, mc.mPreInitEntryProtocolMerges, mc.mPostInitEntryProtocolMerges, mc.mNewInitEntries, mc.mOldInitEntries); - for (uint32_t level = 0; level < BucketList::kNumLevels; ++level) + for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level) { - auto& lev = bm.getBucketList().getLevel(level); + auto& lev = bm.getLiveBucketList().getLevel(level); BucketTestUtils::EntryCounts currCounts(lev.getCurr()); BucketTestUtils::EntryCounts snapCounts(lev.getSnap()); CLOG_INFO( Bucket, "post-ledger {} close, init counts: level {}, {} in curr, " "{} in snap", - ledgerSeq, level, currCounts.nInit, snapCounts.nInit); + ledgerSeq, level, currCounts.nInitOrArchived, + snapCounts.nInitOrArchived); } if (ledgerSeq < 5) { @@ -2030,8 +2031,8 @@ TEST_CASE("upgrade to version 11", "[upgrades]") // - From 8 on, the INITENTRYs propagate to lev[1].curr REQUIRE(mc.mPreInitEntryProtocolMerges == 5); REQUIRE(mc.mPostInitEntryProtocolMerges != 0); - auto& lev0 = bm.getBucketList().getLevel(0); - auto& lev1 = bm.getBucketList().getLevel(1); + auto& lev0 = bm.getLiveBucketList().getLevel(0); + auto& lev1 = bm.getLiveBucketList().getLevel(1); auto lev0Curr = lev0.getCurr(); auto lev0Snap = lev0.getSnap(); auto lev1Curr = lev1.getCurr(); @@ -2039,22 +2040,22 @@ TEST_CASE("upgrade to version 11", "[upgrades]") BucketTestUtils::EntryCounts lev0CurrCounts(lev0Curr); BucketTestUtils::EntryCounts lev0SnapCounts(lev0Snap); BucketTestUtils::EntryCounts lev1CurrCounts(lev1Curr); - auto getVers = [](std::shared_ptr b) -> uint32_t { - return BucketInputIterator(b).getMetadata().ledgerVersion; + auto getVers = [](std::shared_ptr b) -> uint32_t { + return LiveBucketInputIterator(b).getMetadata().ledgerVersion; }; switch (ledgerSeq) { default: case 8: REQUIRE(getVers(lev1Curr) == newProto); - REQUIRE(lev1CurrCounts.nInit != 0); + REQUIRE(lev1CurrCounts.nInitOrArchived != 0); case 7: case 6: REQUIRE(getVers(lev0Snap) == newProto); - REQUIRE(lev0SnapCounts.nInit != 0); + REQUIRE(lev0SnapCounts.nInitOrArchived != 0); case 5: REQUIRE(getVers(lev0Curr) == newProto); - REQUIRE(lev0CurrCounts.nInit != 0); + REQUIRE(lev0CurrCounts.nInitOrArchived != 0); } } } @@ -2108,7 +2109,7 @@ TEST_CASE("upgrade to version 12", "[upgrades]") app->getConfig().NODE_SEED); lm.closeLedger(LedgerCloseData(ledgerSeq, txSet, sv)); auto& bm = app->getBucketManager(); - auto& bl = bm.getBucketList(); + auto& bl = bm.getLiveBucketList(); while (!bl.futuresAllResolved()) { std::this_thread::sleep_for(std::chrono::milliseconds(10)); @@ -2122,14 +2123,14 @@ TEST_CASE("upgrade to version 12", "[upgrades]") } else { - auto& lev0 = bm.getBucketList().getLevel(0); - auto& lev1 = bm.getBucketList().getLevel(1); + auto& lev0 = bm.getLiveBucketList().getLevel(0); + auto& lev1 = bm.getLiveBucketList().getLevel(1); auto lev0Curr = lev0.getCurr(); auto lev0Snap = lev0.getSnap(); auto lev1Curr = lev1.getCurr(); auto lev1Snap = lev1.getSnap(); - auto getVers = [](std::shared_ptr b) -> uint32_t { - return BucketInputIterator(b).getMetadata().ledgerVersion; + auto getVers = [](std::shared_ptr b) -> uint32_t { + return LiveBucketInputIterator(b).getMetadata().ledgerVersion; }; switch (ledgerSeq) { @@ -2233,7 +2234,7 @@ TEST_CASE("configuration initialized in version upgrade", "[upgrades]") REQUIRE(!ltx.load(getMaxContractSizeKey())); } - auto blSize = app->getBucketManager().getBucketList().getSize(); + auto blSize = app->getBucketManager().getLiveBucketList().getSize(); executeUpgrade(*app, makeProtocolVersionUpgrade( static_cast(SOROBAN_PROTOCOL_VERSION))); @@ -2275,7 +2276,7 @@ TEST_CASE_VERSIONS("upgrade base reserve", "[upgrades]") { VirtualClock clock; - auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); auto app = createTestApplication(clock, cfg); auto& lm = app->getLedgerManager(); @@ -2974,7 +2975,7 @@ TEST_CASE("upgrade from cpp14 serialized data", "[upgrades]") TEST_CASE("upgrades serialization roundtrip", "[upgrades]") { - auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; auto app = createTestApplication(clock, cfg); @@ -3058,7 +3059,7 @@ TEST_CASE("upgrades serialization roundtrip", "[upgrades]") TEST_CASE_VERSIONS("upgrade flags", "[upgrades][liquiditypool]") { VirtualClock clock; - auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); auto app = createTestApplication(clock, cfg); diff --git a/src/history/FileTransferInfo.h b/src/history/FileTransferInfo.h index 1e66b3f8ab..daeebddcc7 100644 --- a/src/history/FileTransferInfo.h +++ b/src/history/FileTransferInfo.h @@ -28,7 +28,7 @@ class FileTransferInfo std::string getLocalDir(TmpDir const& localRoot) const; public: - FileTransferInfo(Bucket const& bucket) + FileTransferInfo(LiveBucket const& bucket) : mType(HISTORY_FILE_TYPE_BUCKET) , mHexDigits(binToHex(bucket.getHash())) , mLocalPath(bucket.getFilename().string()) diff --git a/src/history/HistoryArchive.cpp b/src/history/HistoryArchive.cpp index a627b98c81..a2f8992547 100644 --- a/src/history/HistoryArchive.cpp +++ b/src/history/HistoryArchive.cpp @@ -246,7 +246,7 @@ HistoryArchiveState::differingBuckets(HistoryArchiveState const& other) const inhibit.insert(b.snap); } std::vector ret; - for (size_t i = BucketList::kNumLevels; i != 0; --i) + for (size_t i = LiveBucketList::kNumLevels; i != 0; --i) { auto s = currentBuckets[i - 1].snap; auto n = s; @@ -307,12 +307,12 @@ HistoryArchiveState::containsValidBuckets(Application& app) const // Process bucket, return version auto processBucket = [&](std::string const& bucketHash) { auto bucket = - app.getBucketManager().getBucketByHash(hexToBin256(bucketHash)); + app.getBucketManager().getLiveBucketByHash(hexToBin256(bucketHash)); releaseAssert(bucket); int32_t version = 0; if (!bucket->isEmpty()) { - version = Bucket::getBucketVersion(bucket); + version = bucket->getBucketVersion(); if (!nonEmptySeen) { nonEmptySeen = true; @@ -322,7 +322,7 @@ HistoryArchiveState::containsValidBuckets(Application& app) const }; // Iterate bottom-up, from oldest to newest buckets - for (uint32_t j = BucketList::kNumLevels; j != 0; --j) + for (uint32_t j = LiveBucketList::kNumLevels; j != 0; --j) { auto i = j - 1; auto const& level = currentBuckets[i]; @@ -358,7 +358,8 @@ HistoryArchiveState::containsValidBuckets(Application& app) const continue; } else if (protocolVersionStartsFrom( - prevSnapVersion, Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) + prevSnapVersion, + LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) { if (!level.next.isClear()) { @@ -384,16 +385,17 @@ HistoryArchiveState::prepareForPublish(Application& app) // Level 0 future buckets are always clear releaseAssert(currentBuckets[0].next.isClear()); - for (uint32_t i = 1; i < BucketList::kNumLevels; i++) + for (uint32_t i = 1; i < LiveBucketList::kNumLevels; i++) { auto& level = currentBuckets[i]; auto& prev = currentBuckets[i - 1]; auto snap = - app.getBucketManager().getBucketByHash(hexToBin256(prev.snap)); + app.getBucketManager().getLiveBucketByHash(hexToBin256(prev.snap)); if (!level.next.isClear() && - protocolVersionStartsFrom(Bucket::getBucketVersion(snap), - Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) + protocolVersionStartsFrom( + snap->getBucketVersion(), + LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)) { level.next.clear(); } @@ -423,20 +425,20 @@ HistoryArchiveState::HistoryArchiveState() : server(STELLAR_CORE_VERSION) HistoryStateBucket b; b.curr = s; b.snap = s; - while (currentBuckets.size() < BucketList::kNumLevels) + while (currentBuckets.size() < LiveBucketList::kNumLevels) { currentBuckets.push_back(b); } } HistoryArchiveState::HistoryArchiveState(uint32_t ledgerSeq, - BucketList const& buckets, + LiveBucketList const& buckets, std::string const& passphrase) : server(STELLAR_CORE_VERSION) , networkPassphrase(passphrase) , currentLedger(ledgerSeq) { - for (uint32_t i = 0; i < BucketList::kNumLevels; ++i) + for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) { HistoryStateBucket b; auto& level = buckets.getLevel(i); diff --git a/src/history/HistoryArchive.h b/src/history/HistoryArchive.h index 1a70622dac..378716118d 100644 --- a/src/history/HistoryArchive.h +++ b/src/history/HistoryArchive.h @@ -27,13 +27,15 @@ namespace stellar { class Application; -class BucketList; +class LiveBucketList; class Bucket; struct HistoryStateBucket { std::string curr; - FutureBucket next; + + // TODO: Add archival buckets to history + FutureBucket next; std::string snap; template @@ -70,7 +72,7 @@ struct HistoryArchiveState HistoryArchiveState(); - HistoryArchiveState(uint32_t ledgerSeq, BucketList const& buckets, + HistoryArchiveState(uint32_t ledgerSeq, LiveBucketList const& buckets, std::string const& networkPassphrase); static std::string baseName(); diff --git a/src/history/HistoryManager.h b/src/history/HistoryManager.h index d69f1c1bd8..732b1e1795 100644 --- a/src/history/HistoryManager.h +++ b/src/history/HistoryManager.h @@ -180,7 +180,7 @@ namespace stellar { class Application; class Bucket; -class BucketList; +class LiveBucketList; class Config; class Database; class HistoryArchive; diff --git a/src/history/HistoryManagerImpl.cpp b/src/history/HistoryManagerImpl.cpp index 6eb8a257a3..57584aea8e 100644 --- a/src/history/HistoryManagerImpl.cpp +++ b/src/history/HistoryManagerImpl.cpp @@ -221,10 +221,10 @@ HistoryManagerImpl::queueCurrentHistory() ZoneScoped; auto ledger = mApp.getLedgerManager().getLastClosedLedgerNum(); - BucketList bl; + LiveBucketList bl; if (mApp.getConfig().MODE_ENABLES_BUCKETLIST) { - bl = mApp.getBucketManager().getBucketList(); + bl = mApp.getBucketManager().getLiveBucketList(); } HistoryArchiveState has(ledger, bl, mApp.getConfig().NETWORK_PASSPHRASE); diff --git a/src/history/StateSnapshot.cpp b/src/history/StateSnapshot.cpp index 6c03902a2c..d8de6ddd51 100644 --- a/src/history/StateSnapshot.cpp +++ b/src/history/StateSnapshot.cpp @@ -41,7 +41,7 @@ StateSnapshot::StateSnapshot(Application& app, HistoryArchiveState const& state) mSnapDir, HISTORY_FILE_TYPE_SCP, mLocalState.currentLedger)) { - if (mLocalState.currentBuckets.size() != BucketList::kNumLevels) + if (mLocalState.currentBuckets.size() != LiveBucketList::kNumLevels) { throw std::runtime_error("Invalid HAS: malformed bucketlist"); } @@ -147,7 +147,7 @@ StateSnapshot::differingHASFiles(HistoryArchiveState const& other) for (auto const& hash : mLocalState.differingBuckets(other)) { - auto b = mApp.getBucketManager().getBucketByHash(hexToBin256(hash)); + auto b = mApp.getBucketManager().getLiveBucketByHash(hexToBin256(hash)); releaseAssert(b); addIfExists(std::make_shared(*b)); } diff --git a/src/history/test/HistoryTests.cpp b/src/history/test/HistoryTests.cpp index 2b882a9099..244270b7d5 100644 --- a/src/history/test/HistoryTests.cpp +++ b/src/history/test/HistoryTests.cpp @@ -146,7 +146,7 @@ TEST_CASE("History bucket verification", "[history][catchup]") cg->getArchiveDirName())}; std::vector hashes; auto& wm = app->getWorkScheduler(); - std::map> mBuckets; + std::map> mBuckets; auto tmpDir = std::make_unique(app->getTmpDirManager().tmpDir("bucket-test")); @@ -535,7 +535,7 @@ TEST_CASE("Publish works correctly post shadow removal", "[history]") // Perform publish: 2 checkpoints (or 127 ledgers) correspond to 3 // levels being initialized and partially filled in the bucketlist sim.setUpgradeLedger(upgradeLedger, - Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED); + LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED); auto checkpointLedger = sim.getLastCheckpointLedger(2); auto maxLevelTouched = 3; sim.ensureOfflineCatchupPossible(checkpointLedger); @@ -554,7 +554,7 @@ TEST_CASE("Publish works correctly post shadow removal", "[history]") configurator}; uint32_t oldProto = - static_cast(Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED) - 1; + static_cast(LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED) - 1; catchupSimulation.generateRandomLedger(oldProto); // The next sections reflect how future buckets in HAS change, depending on @@ -613,10 +613,8 @@ dbModeName(Config::TestDbMode mode) { switch (mode) { - case Config::TESTDB_IN_MEMORY_OFFERS: - return "TESTDB_IN_MEMORY_OFFERS"; - case Config::TESTDB_ON_DISK_SQLITE: - return "TESTDB_ON_DISK_SQLITE"; + case Config::TESTDB_IN_MEMORY: + return "TESTDB_IN_MEMORY"; #ifdef USE_POSTGRES case Config::TESTDB_POSTGRESQL: return "TESTDB_POSTGRESQL"; @@ -749,7 +747,7 @@ TEST_CASE("History catchup with different modes", 60}; std::vector dbModes = { - Config::TESTDB_ON_DISK_SQLITE, Config::TESTDB_BUCKET_DB_PERSISTENT}; + Config::TESTDB_BUCKET_DB_PERSISTENT}; #ifdef USE_POSTGRES if (!force_sqlite) dbModes.push_back(Config::TESTDB_POSTGRESQL); @@ -1031,7 +1029,7 @@ TEST_CASE("Catchup non-initentry buckets to initentry-supporting works", "[history][bucket][acceptance]") { uint32_t newProto = static_cast( - Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY); + LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY); uint32_t oldProto = newProto - 1; auto configurator = std::make_shared(); @@ -1205,14 +1203,14 @@ TEST_CASE_VERSIONS( Application::pointer app = createTestApplication(clock, cfg); auto& hm = app->getHistoryManager(); auto& lm = app->getLedgerManager(); - auto& bl = app->getBucketManager().getBucketList(); + auto& bl = app->getBucketManager().getLiveBucketList(); while (hm.getPublishQueueCount() != 1) { auto lcl = lm.getLastClosedLedgerHeader(); lcl.header.ledgerSeq += 1; - BucketTestUtils::addBatchAndUpdateSnapshot( - bl, *app, lcl.header, {}, + BucketTestUtils::addLiveBatchAndUpdateSnapshot( + *app, lcl.header, {}, LedgerTestUtils::generateValidUniqueLedgerEntries(8), {}); clock.crank(true); } @@ -1230,7 +1228,7 @@ TEST_CASE_VERSIONS( // Second, ensure `next` is in the exact same state as when it was // queued - for (uint32_t i = 0; i < BucketList::kNumLevels; i++) + for (uint32_t i = 0; i < LiveBucketList::kNumLevels; i++) { auto const& currentNext = bl.getLevel(i).getNext(); auto const& queuedNext = queuedHAS.currentBuckets[i].next; diff --git a/src/history/test/HistoryTestsUtils.cpp b/src/history/test/HistoryTestsUtils.cpp index 5119d372a4..ab47d2973e 100644 --- a/src/history/test/HistoryTestsUtils.cpp +++ b/src/history/test/HistoryTestsUtils.cpp @@ -139,7 +139,7 @@ BucketOutputIteratorForTesting::writeTmpTestBucket() auto ledgerEntries = LedgerTestUtils::generateValidUniqueLedgerEntries(NUM_ITEMS_PER_BUCKET); auto bucketEntries = - Bucket::convertToBucketEntry(false, {}, ledgerEntries, {}); + LiveBucket::convertToBucketEntry(false, {}, ledgerEntries, {}); for (auto const& bucketEntry : bucketEntries) { put(bucketEntry); @@ -543,12 +543,12 @@ CatchupSimulation::generateRandomLedger(uint32_t version) mLedgerHashes.push_back(lclh.hash); mBucketListHashes.push_back(lclh.header.bucketListHash); mBucket0Hashes.push_back(mApp.getBucketManager() - .getBucketList() + .getLiveBucketList() .getLevel(0) .getCurr() ->getHash()); mBucket1Hashes.push_back(mApp.getBucketManager() - .getBucketList() + .getLiveBucketList() .getLevel(2) .getCurr() ->getHash()); @@ -600,7 +600,7 @@ CatchupSimulation::ensureLedgerAvailable(uint32_t targetLedger) if (hm.publishCheckpointOnLedgerClose(lcl)) { mBucketListAtLastPublish = - getApp().getBucketManager().getBucketList(); + getApp().getBucketManager().getLiveBucketList(); } } } @@ -950,12 +950,12 @@ CatchupSimulation::validateCatchup(Application::pointer app) auto haveBucketListHash = lm.getLastClosedLedgerHeader().header.bucketListHash; auto haveBucket0Hash = app->getBucketManager() - .getBucketList() + .getLiveBucketList() .getLevel(0) .getCurr() ->getHash(); auto haveBucket1Hash = app->getBucketManager() - .getBucketList() + .getLiveBucketList() .getLevel(2) .getCurr() ->getHash(); @@ -986,8 +986,8 @@ CatchupSimulation::validateCatchup(Application::pointer app) CHECK(wantBucketListHash == haveBucketListHash); CHECK(wantHash == haveHash); - CHECK(app->getBucketManager().getBucketByHash(wantBucket0Hash)); - CHECK(app->getBucketManager().getBucketByHash(wantBucket1Hash)); + CHECK(app->getBucketManager().getLiveBucketByHash(wantBucket0Hash)); + CHECK(app->getBucketManager().getLiveBucketByHash(wantBucket1Hash)); CHECK(wantBucket0Hash == haveBucket0Hash); CHECK(wantBucket1Hash == haveBucket1Hash); diff --git a/src/history/test/HistoryTestsUtils.h b/src/history/test/HistoryTestsUtils.h index eace1f3a6e..8c886c152a 100644 --- a/src/history/test/HistoryTestsUtils.h +++ b/src/history/test/HistoryTestsUtils.h @@ -98,7 +98,7 @@ class RealGenesisTmpDirHistoryConfigurator : public TmpDirHistoryConfigurator Config& configure(Config& cfg, bool writable) const override; }; -class BucketOutputIteratorForTesting : public BucketOutputIterator +class BucketOutputIteratorForTesting : public LiveBucketOutputIterator { const size_t NUM_ITEMS_PER_BUCKET = 5; @@ -185,7 +185,7 @@ class CatchupSimulation std::vector mCfgs; Application::pointer mAppPtr; Application& mApp; - BucketList mBucketListAtLastPublish; + LiveBucketList mBucketListAtLastPublish; std::vector mLedgerCloseDatas; diff --git a/src/historywork/DownloadBucketsWork.cpp b/src/historywork/DownloadBucketsWork.cpp index 2dcea7ba61..2606a695ae 100644 --- a/src/historywork/DownloadBucketsWork.cpp +++ b/src/historywork/DownloadBucketsWork.cpp @@ -17,7 +17,8 @@ namespace stellar { DownloadBucketsWork::DownloadBucketsWork( - Application& app, std::map>& buckets, + Application& app, + std::map>& buckets, std::vector hashes, TmpDir const& downloadDir, std::shared_ptr archive) : BatchWork{app, "download-verify-buckets"} @@ -94,7 +95,7 @@ DownloadBucketsWork::yieldMoreWork() if (self) { auto bucketPath = ft.localPath_nogz(); - auto b = app.getBucketManager().adoptFileAsBucket( + auto b = app.getBucketManager().adoptFileAsLiveBucket( bucketPath, hexToBin256(hash), /*mergeKey=*/nullptr, /*index=*/nullptr); diff --git a/src/historywork/DownloadBucketsWork.h b/src/historywork/DownloadBucketsWork.h index b55942eeb3..52db6cd968 100644 --- a/src/historywork/DownloadBucketsWork.h +++ b/src/historywork/DownloadBucketsWork.h @@ -17,18 +17,18 @@ class HistoryArchive; class DownloadBucketsWork : public BatchWork { - std::map>& mBuckets; + std::map>& mBuckets; std::vector mHashes; std::vector::const_iterator mNextBucketIter; TmpDir const& mDownloadDir; std::shared_ptr mArchive; public: - DownloadBucketsWork(Application& app, - std::map>& buckets, - std::vector hashes, - TmpDir const& downloadDir, - std::shared_ptr archive = nullptr); + DownloadBucketsWork( + Application& app, + std::map>& buckets, + std::vector hashes, TmpDir const& downloadDir, + std::shared_ptr archive = nullptr); ~DownloadBucketsWork() = default; std::string getStatus() const override; diff --git a/src/invariant/BucketListIsConsistentWithDatabase.cpp b/src/invariant/BucketListIsConsistentWithDatabase.cpp index e12da7b724..dfd00554c3 100644 --- a/src/invariant/BucketListIsConsistentWithDatabase.cpp +++ b/src/invariant/BucketListIsConsistentWithDatabase.cpp @@ -17,6 +17,7 @@ #include "main/Application.h" #include "main/PersistentState.h" #include "medida/timer.h" +#include "util/GlobalChecks.h" #include "util/XDRCereal.h" #include #include @@ -26,7 +27,9 @@ namespace stellar { -static std::string +namespace +{ +std::string checkAgainstDatabase(AbstractLedgerTxn& ltx, LedgerEntry const& entry) { auto fromDb = ltx.loadWithoutRecord(LedgerEntryKey(entry)); @@ -51,7 +54,7 @@ checkAgainstDatabase(AbstractLedgerTxn& ltx, LedgerEntry const& entry) } } -static std::string +std::string checkAgainstDatabase(AbstractLedgerTxn& ltx, LedgerKey const& key) { auto fromDb = ltx.loadWithoutRecord(key); @@ -65,6 +68,25 @@ checkAgainstDatabase(AbstractLedgerTxn& ltx, LedgerKey const& key) return s; } +std::string +checkDbEntryCounts(Application& app, LedgerRange const& range, + uint64_t expectedOfferCount) +{ + std::string msg; + auto& ltxRoot = app.getLedgerTxnRoot(); + uint64_t numInDb = ltxRoot.countOffers(range); + if (numInDb != expectedOfferCount) + { + msg = fmt::format( + FMT_STRING("Incorrect OFFER count: Bucket = {:d} Database " + "= {:d}"), + expectedOfferCount, numInDb); + } + + return msg; +} +} + std::shared_ptr BucketListIsConsistentWithDatabase::registerInvariant(Application& app) { @@ -84,103 +106,6 @@ BucketListIsConsistentWithDatabase::getName() const return "BucketListIsConsistentWithDatabase"; } -struct EntryCounts -{ - uint64_t mAccounts{0}; - uint64_t mTrustLines{0}; - uint64_t mOffers{0}; - uint64_t mData{0}; - uint64_t mClaimableBalance{0}; - uint64_t mLiquidityPool{0}; - uint64_t mContractData{0}; - uint64_t mContractCode{0}; - uint64_t mConfigSettings{0}; - uint64_t mTTL{0}; - - uint64_t - totalEntries() const - { - return mAccounts + mTrustLines + mOffers + mData + mClaimableBalance + - mLiquidityPool + mContractData + mConfigSettings + mTTL; - } - - void - countLiveEntry(LedgerEntry const& e) - { - switch (e.data.type()) - { - case ACCOUNT: - ++mAccounts; - break; - case TRUSTLINE: - ++mTrustLines; - break; - case OFFER: - ++mOffers; - break; - case DATA: - ++mData; - break; - case CLAIMABLE_BALANCE: - ++mClaimableBalance; - break; - case LIQUIDITY_POOL: - ++mLiquidityPool; - break; - case CONTRACT_DATA: - ++mContractData; - break; - case CONTRACT_CODE: - ++mContractCode; - break; - case CONFIG_SETTING: - ++mConfigSettings; - break; - case TTL: - ++mTTL; - break; - default: - throw std::runtime_error( - fmt::format(FMT_STRING("unknown ledger entry type: {:d}"), - static_cast(e.data.type()))); - } - } - - std::string - checkDbEntryCounts(Application& app, LedgerRange const& range, - std::function entryTypeFilter) - { - std::string msg; - auto check = [&](LedgerEntryType let, uint64_t numInBucket) { - if (entryTypeFilter(let)) - { - auto& ltxRoot = app.getLedgerTxnRoot(); - uint64_t numInDb = ltxRoot.countObjects(let, range); - if (numInDb != numInBucket) - { - msg = fmt::format( - FMT_STRING("Incorrect {} count: Bucket = {:d} Database " - "= {:d}"), - xdr::xdr_traits::enum_name(let), - numInBucket, numInDb); - return false; - } - } - return true; - }; - - // Uses short-circuiting to make this compact - check(ACCOUNT, mAccounts) && check(TRUSTLINE, mTrustLines) && - check(OFFER, mOffers) && check(DATA, mData) && - check(CLAIMABLE_BALANCE, mClaimableBalance) && - check(LIQUIDITY_POOL, mLiquidityPool) && - check(CONTRACT_DATA, mContractData) && - check(CONTRACT_CODE, mContractCode) && - check(CONFIG_SETTING, mConfigSettings) && check(TTL, mTTL); - return msg; - } -}; - void BucketListIsConsistentWithDatabase::checkEntireBucketlist() { @@ -189,29 +114,29 @@ BucketListIsConsistentWithDatabase::checkEntireBucketlist() HistoryArchiveState has = lm.getLastClosedLedgerHAS(); std::map bucketLedgerMap = bm.loadCompleteLedgerState(has); - EntryCounts counts; + uint64_t offerCount = 0; medida::Timer timer(std::chrono::microseconds(1)); { LedgerTxn ltx(mApp.getLedgerTxnRoot()); for (auto const& pair : bucketLedgerMap) { - // Don't check entry types in BucketListDB when enabled - if (mApp.getConfig().isUsingBucketListDB() && - !BucketIndex::typeNotSupported(pair.first.type())) + // Don't check entry types supported by BucketListDB, since they + // won't exist in SQL + if (!BucketIndex::typeNotSupported(pair.first.type())) { continue; } - counts.countLiveEntry(pair.second); + ++offerCount; std::string s; timer.Time([&]() { s = checkAgainstDatabase(ltx, pair.second); }); if (!s.empty()) { throw std::runtime_error(s); } - auto i = counts.totalEntries(); - if ((i & 0x7ffff) == 0) + + if ((offerCount & 0x7ffff) == 0) { using namespace std::chrono; nanoseconds ns = timer.duration_unit() * @@ -220,60 +145,37 @@ BucketListIsConsistentWithDatabase::checkEntireBucketlist() CLOG_INFO(Ledger, "Checked bucket-vs-DB consistency for " "{} entries (mean {}/entry)", - i, us); + offerCount, us); } } } - // Count functionality does not support in-memory LedgerTxn - if (!mApp.getConfig().isInMemoryMode()) - { - auto range = LedgerRange::inclusive(LedgerManager::GENESIS_LEDGER_SEQ, - has.currentLedger); + auto range = LedgerRange::inclusive(LedgerManager::GENESIS_LEDGER_SEQ, + has.currentLedger); - // If BucketListDB enabled, only types not supported by BucketListDB - // should be in SQL DB - std::function filter; - if (mApp.getConfig().isUsingBucketListDB()) - { - filter = BucketIndex::typeNotSupported; - } - else - { - filter = [](LedgerEntryType) { return true; }; - } - - auto s = counts.checkDbEntryCounts(mApp, range, filter); - if (!s.empty()) - { - throw std::runtime_error(s); - } + auto s = checkDbEntryCounts(mApp, range, offerCount); + if (!s.empty()) + { + throw std::runtime_error(s); } - if (mApp.getConfig().isUsingBucketListDB() && - mApp.getPersistentState().getState(PersistentState::kDBBackend) != - BucketIndex::DB_BACKEND_STATE) + if (mApp.getPersistentState().getState(PersistentState::kDBBackend) != + BucketIndex::DB_BACKEND_STATE) { - throw std::runtime_error("BucketListDB enabled but BucketListDB flag " - "not set in PersistentState."); + throw std::runtime_error( + "Corrupt DB: BucketListDB flag " + "not set in PersistentState. Please run new-db or upgrade-db"); } } std::string BucketListIsConsistentWithDatabase::checkAfterAssumeState(uint32_t newestLedger) { - // If BucketListDB is disabled, we've already enforced the invariant on a - // per-Bucket level - if (!mApp.getConfig().isUsingBucketListDB()) - { - return {}; - } - - EntryCounts counts; + uint64_t offerCount = 0; LedgerKeySet seenKeys; auto perBucketCheck = [&](auto bucket, auto& ltx) { - for (BucketInputIterator iter(bucket); iter; ++iter) + for (LiveBucketInputIterator iter(bucket); iter; ++iter) { auto const& e = *iter; @@ -290,8 +192,7 @@ BucketListIsConsistentWithDatabase::checkAfterAssumeState(uint32_t newestLedger) auto [_, newKey] = seenKeys.emplace(key); if (newKey) { - counts.countLiveEntry(e.liveEntry()); - + ++offerCount; auto s = checkAgainstDatabase(ltx, e.liveEntry()); if (!s.empty()) { @@ -325,9 +226,9 @@ BucketListIsConsistentWithDatabase::checkAfterAssumeState(uint32_t newestLedger) { LedgerTxn ltx(mApp.getLedgerTxnRoot()); - auto& bl = mApp.getBucketManager().getBucketList(); + auto& bl = mApp.getBucketManager().getLiveBucketList(); - for (uint32_t i = 0; i < BucketList::kNumLevels; ++i) + for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) { auto const& level = bl.getLevel(i); for (auto const& bucket : {level.getCurr(), level.getSnap()}) @@ -344,26 +245,25 @@ BucketListIsConsistentWithDatabase::checkAfterAssumeState(uint32_t newestLedger) auto range = LedgerRange::inclusive(LedgerManager::GENESIS_LEDGER_SEQ, newestLedger); - // SQL only stores offers when BucketListDB is enabled - return counts.checkDbEntryCounts( - mApp, range, [](LedgerEntryType let) { return let == OFFER; }); + return checkDbEntryCounts(mApp, range, offerCount); } std::string BucketListIsConsistentWithDatabase::checkOnBucketApply( - std::shared_ptr bucket, uint32_t oldestLedger, - uint32_t newestLedger, std::function entryTypeFilter) + std::shared_ptr bucket, uint32_t oldestLedger, + uint32_t newestLedger, std::unordered_set const& shadowedKeys) { - EntryCounts counts; + uint64_t offerCount = 0; { LedgerTxn ltx(mApp.getLedgerTxnRoot()); bool hasPreviousEntry = false; BucketEntry previousEntry; - for (BucketInputIterator iter(bucket); iter; ++iter) + for (LiveBucketInputIterator iter(bucket); iter; ++iter) { auto const& e = *iter; - if (hasPreviousEntry && !BucketEntryIdCmp{}(previousEntry, e)) + if (hasPreviousEntry && + !BucketEntryIdCmp{}(previousEntry, e)) { std::string s = "Bucket has out of order entries: "; s += xdrToCerealString(previousEntry, "previous"); @@ -394,28 +294,25 @@ BucketListIsConsistentWithDatabase::checkOnBucketApply( return s; } - if (entryTypeFilter(e.liveEntry().data.type())) + // Don't check DB against keys shadowed by earlier Buckets + if (BucketIndex::typeNotSupported(e.liveEntry().data.type()) && + shadowedKeys.find(LedgerEntryKey(e.liveEntry())) == + shadowedKeys.end()) { - counts.countLiveEntry(e.liveEntry()); - - // BucketListDB is not compatible with per-Bucket database - // consistency checks - if (!mApp.getConfig().isUsingBucketListDB()) + ++offerCount; + auto s = checkAgainstDatabase(ltx, e.liveEntry()); + if (!s.empty()) { - auto s = checkAgainstDatabase(ltx, e.liveEntry()); - if (!s.empty()) - { - return s; - } + return s; } } } - else if (e.type() == DEADENTRY) + else { - // BucketListDB is not compatible with per-Bucket database - // consistency checks - if (entryTypeFilter(e.deadEntry().type()) && - !mApp.getConfig().isUsingBucketListDB()) + // Only check for OFFER keys that are not shadowed by an earlier + // bucket + if (BucketIndex::typeNotSupported(e.deadEntry().type()) && + shadowedKeys.find(e.deadEntry()) == shadowedKeys.end()) { auto s = checkAgainstDatabase(ltx, e.deadEntry()); if (!s.empty()) @@ -428,13 +325,6 @@ BucketListIsConsistentWithDatabase::checkOnBucketApply( } auto range = LedgerRange::inclusive(oldestLedger, newestLedger); - - // BucketListDB not compatible with per-Bucket database consistency checks - if (!mApp.getConfig().isUsingBucketListDB()) - { - return counts.checkDbEntryCounts(mApp, range, entryTypeFilter); - } - - return std::string{}; + return checkDbEntryCounts(mApp, range, offerCount); } } diff --git a/src/invariant/BucketListIsConsistentWithDatabase.h b/src/invariant/BucketListIsConsistentWithDatabase.h index b98253dbc9..a9bb3003ac 100644 --- a/src/invariant/BucketListIsConsistentWithDatabase.h +++ b/src/invariant/BucketListIsConsistentWithDatabase.h @@ -34,9 +34,9 @@ class BucketListIsConsistentWithDatabase : public Invariant virtual std::string getName() const override; virtual std::string checkOnBucketApply( - std::shared_ptr bucket, uint32_t oldestLedger, + std::shared_ptr bucket, uint32_t oldestLedger, uint32_t newestLedger, - std::function entryTypeFilter) override; + std::unordered_set const& shadowedKeys) override; virtual std::string checkAfterAssumeState(uint32_t newestLedger) override; diff --git a/src/invariant/Invariant.h b/src/invariant/Invariant.h index ddb235795d..6a90105477 100644 --- a/src/invariant/Invariant.h +++ b/src/invariant/Invariant.h @@ -8,15 +8,17 @@ #include #include #include +#include namespace stellar { -class Bucket; +class LiveBucket; enum LedgerEntryType : std::int32_t; struct LedgerTxnDelta; struct Operation; struct OperationResult; +struct LedgerKey; // NOTE: The checkOn* functions should have a default implementation so that // more can be added in the future without requiring changes to all @@ -43,9 +45,9 @@ class Invariant } virtual std::string - checkOnBucketApply(std::shared_ptr bucket, + checkOnBucketApply(std::shared_ptr bucket, uint32_t oldestLedger, uint32_t newestLedger, - std::function entryTypeFilter) + std::unordered_set const& shadowedKeys) { return std::string{}; } diff --git a/src/invariant/InvariantManager.h b/src/invariant/InvariantManager.h index 361afc150a..61575fcd49 100644 --- a/src/invariant/InvariantManager.h +++ b/src/invariant/InvariantManager.h @@ -35,10 +35,12 @@ class InvariantManager virtual Json::Value getJsonInfo() = 0; virtual std::vector getEnabledInvariants() const = 0; + virtual bool isBucketApplyInvariantEnabled() const = 0; - virtual void checkOnBucketApply( - std::shared_ptr bucket, uint32_t ledger, uint32_t level, - bool isCurr, std::function entryTypeFilter) = 0; + virtual void + checkOnBucketApply(std::shared_ptr bucket, + uint32_t ledger, uint32_t level, bool isCurr, + std::unordered_set const& shadowedKeys) = 0; virtual void checkAfterAssumeState(uint32_t newestLedger) = 0; diff --git a/src/invariant/InvariantManagerImpl.cpp b/src/invariant/InvariantManagerImpl.cpp index df0ca6f61a..c0da64a78b 100644 --- a/src/invariant/InvariantManagerImpl.cpp +++ b/src/invariant/InvariantManagerImpl.cpp @@ -69,21 +69,30 @@ InvariantManagerImpl::getEnabledInvariants() const return res; } +bool +InvariantManagerImpl::isBucketApplyInvariantEnabled() const +{ + return std::any_of(mEnabled.begin(), mEnabled.end(), [](auto const& inv) { + return inv->getName() == "BucketListIsConsistentWithDatabase"; + }); +} + void InvariantManagerImpl::checkOnBucketApply( - std::shared_ptr bucket, uint32_t ledger, uint32_t level, - bool isCurr, std::function entryTypeFilter) + std::shared_ptr bucket, uint32_t ledger, uint32_t level, + bool isCurr, std::unordered_set const& shadowedKeys) { - uint32_t oldestLedger = isCurr - ? BucketList::oldestLedgerInCurr(ledger, level) - : BucketList::oldestLedgerInSnap(ledger, level); - uint32_t newestLedger = oldestLedger - 1 + - (isCurr ? BucketList::sizeOfCurr(ledger, level) - : BucketList::sizeOfSnap(ledger, level)); + uint32_t oldestLedger = + isCurr ? LiveBucketList::oldestLedgerInCurr(ledger, level) + : LiveBucketList::oldestLedgerInSnap(ledger, level); + uint32_t newestLedger = + oldestLedger - 1 + + (isCurr ? LiveBucketList::sizeOfCurr(ledger, level) + : LiveBucketList::sizeOfSnap(ledger, level)); for (auto invariant : mEnabled) { - auto result = invariant->checkOnBucketApply( - bucket, oldestLedger, newestLedger, entryTypeFilter); + auto result = invariant->checkOnBucketApply(bucket, oldestLedger, + newestLedger, shadowedKeys); if (result.empty()) { continue; diff --git a/src/invariant/InvariantManagerImpl.h b/src/invariant/InvariantManagerImpl.h index 5e495bcf3c..fbbb35fee8 100644 --- a/src/invariant/InvariantManagerImpl.h +++ b/src/invariant/InvariantManagerImpl.h @@ -36,15 +36,16 @@ class InvariantManagerImpl : public InvariantManager virtual Json::Value getJsonInfo() override; virtual std::vector getEnabledInvariants() const override; + bool isBucketApplyInvariantEnabled() const override; virtual void checkOnOperationApply(Operation const& operation, OperationResult const& opres, LedgerTxnDelta const& ltxDelta) override; virtual void checkOnBucketApply( - std::shared_ptr bucket, uint32_t ledger, uint32_t level, - bool isCurr, - std::function entryTypeFilter) override; + std::shared_ptr bucket, uint32_t ledger, + uint32_t level, bool isCurr, + std::unordered_set const& shadowedKeys) override; virtual void checkAfterAssumeState(uint32_t newestLedger) override; diff --git a/src/invariant/test/AccountSubEntriesCountIsValidTests.cpp b/src/invariant/test/AccountSubEntriesCountIsValidTests.cpp index acc308be6b..20892ad29e 100644 --- a/src/invariant/test/AccountSubEntriesCountIsValidTests.cpp +++ b/src/invariant/test/AccountSubEntriesCountIsValidTests.cpp @@ -292,7 +292,7 @@ deleteRandomSubEntryFromAccount(Application& app, LedgerEntry& le, TEST_CASE("Create account with no subentries", "[invariant][accountsubentriescount]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.INVARIANT_CHECKS = {"AccountSubEntriesCountIsValid"}; VirtualClock clock; Application::pointer app = createTestApplication(clock, cfg); @@ -309,7 +309,7 @@ TEST_CASE("Create account then add signers and subentries", "[invariant][accountsubentriescount]") { stellar::uniform_int_distribution changesDist(-1, 2); - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.INVARIANT_CHECKS = {"AccountSubEntriesCountIsValid"}; for (uint32_t i = 0; i < 50; ++i) diff --git a/src/invariant/test/BucketListIsConsistentWithDatabaseTests.cpp b/src/invariant/test/BucketListIsConsistentWithDatabaseTests.cpp index 69edb0711b..0cd75275ad 100644 --- a/src/invariant/test/BucketListIsConsistentWithDatabaseTests.cpp +++ b/src/invariant/test/BucketListIsConsistentWithDatabaseTests.cpp @@ -2,6 +2,7 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 +#include "bucket/Bucket.h" #include "bucket/BucketInputIterator.h" #include "bucket/BucketManager.h" #include "bucket/BucketOutputIterator.h" @@ -19,6 +20,7 @@ #include "test/test.h" #include "transactions/TransactionUtils.h" #include "util/Decoder.h" +#include "util/GlobalChecks.h" #include "util/Math.h" #include "util/UnorderedSet.h" #include "util/XDROperators.h" @@ -42,44 +44,10 @@ struct BucketListGenerator public: BucketListGenerator() : mLedgerSeq(1) { - auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS); + auto cfg = getTestConfig(); cfg.OVERRIDE_EVICTION_PARAMS_FOR_TESTING = true; cfg.TESTING_STARTING_EVICTION_SCAN_LEVEL = 1; mAppGenerate = createTestApplication(mClock, cfg); - - auto skey = SecretKey::fromSeed(mAppGenerate->getNetworkID()); - LedgerKey key(ACCOUNT); - key.account().accountID = skey.getPublicKey(); - mLiveKeys.insert(key); - - if (appProtocolVersionStartsFrom(*mAppGenerate, - SOROBAN_PROTOCOL_VERSION)) - { - // All config settings entries will be created automatically during - // the protocol upgrade and NOT generated by tests, so they should - // be reflected in the live key set. This allows tests to still run - // on those entries. - for (auto t : xdr::xdr_traits::enum_values()) - { -#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION - // This setting has been introduced in the vnext xdr, but it's - // not used in code yet. This check can be replaced with a - // runtime protocol check once we create the setting in the - // upgrade path. - if (static_cast(t) == - ConfigSettingID:: - CONFIG_SETTING_CONTRACT_PARALLEL_COMPUTE_V0) - { - continue; - } -#endif - LedgerKey ckey(CONFIG_SETTING); - ckey.configSetting().configSettingID = - static_cast(t); - mLiveKeys.insert(ckey); - } - } - LedgerTxn ltx(mAppGenerate->getLedgerTxnRoot(), false); REQUIRE(mLedgerSeq == ltx.loadHeader().current().ledgerSeq); } @@ -88,7 +56,7 @@ struct BucketListGenerator void applyBuckets(Application::pointer app, Args&&... args) { - std::map> buckets; + std::map> buckets; auto has = getHistoryArchiveState(app); auto& wm = app->getWorkScheduler(); wm.executeWork(buckets, has, @@ -101,8 +69,8 @@ struct BucketListGenerator applyBuckets(Args&&... args) { VirtualClock clock; - Application::pointer app = createTestApplication( - clock, getTestConfig(1, Config::TESTDB_IN_MEMORY_OFFERS)); + Application::pointer app = + createTestApplication(clock, getTestConfig(1)); applyBuckets(app, std::forward(args)...); } @@ -144,9 +112,8 @@ struct BucketListGenerator std::vector deadEntries; auto header = ltx.loadHeader().current(); ltx.getAllEntries(initEntries, liveEntries, deadEntries); - BucketTestUtils::addBatchAndUpdateSnapshot( - app->getBucketManager().getBucketList(), *app, header, initEntries, - liveEntries, deadEntries); + BucketTestUtils::addLiveBatchAndUpdateSnapshot( + *app, header, initEntries, liveEntries, deadEntries); ltx.commit(); } @@ -164,8 +131,8 @@ struct BucketListGenerator generateLiveEntries(AbstractLedgerTxn& ltx) { auto entries = - LedgerTestUtils::generateValidLedgerEntriesWithExclusions( - {CONFIG_SETTING}, 5); + LedgerTestUtils::generateValidUniqueLedgerEntriesWithTypes({OFFER}, + 5); for (auto& le : entries) { le.lastModifiedLedgerSeq = mLedgerSeq; @@ -176,12 +143,7 @@ struct BucketListGenerator virtual std::vector generateDeadEntries(AbstractLedgerTxn& ltx) { - UnorderedSet liveDeletable(mLiveKeys.size()); - std::copy_if( - mLiveKeys.begin(), mLiveKeys.end(), - std::inserter(liveDeletable, liveDeletable.end()), - [](LedgerKey const& key) { return key.type() != CONFIG_SETTING; }); - + UnorderedSet liveDeletable = mLiveKeys; std::vector dead; while (dead.size() < 2 && !liveDeletable.empty()) { @@ -205,28 +167,28 @@ struct BucketListGenerator HistoryArchiveState getHistoryArchiveState(Application::pointer app) { - auto& blGenerate = mAppGenerate->getBucketManager().getBucketList(); + auto& blGenerate = mAppGenerate->getBucketManager().getLiveBucketList(); auto& bmApply = app->getBucketManager(); MergeCounters mergeCounters; LedgerTxn ltx(mAppGenerate->getLedgerTxnRoot(), false); auto vers = ltx.loadHeader().current().ledgerVersion; - for (uint32_t i = 0; i <= BucketList::kNumLevels - 1; i++) + for (uint32_t i = 0; i <= LiveBucketList::kNumLevels - 1; i++) { auto& level = blGenerate.getLevel(i); auto meta = testutil::testBucketMetadata(vers); - auto keepDead = BucketList::keepDeadEntries(i); + auto keepDead = LiveBucketList::keepTombstoneEntries(i); auto writeBucketFile = [&](auto b) { - BucketOutputIterator out(bmApply.getTmpDir(), keepDead, meta, - mergeCounters, mClock.getIOContext(), - /*doFsync=*/true); - for (BucketInputIterator in(b); in; ++in) + LiveBucketOutputIterator out(bmApply.getTmpDir(), keepDead, + meta, mergeCounters, + mClock.getIOContext(), + /*doFsync=*/true); + for (LiveBucketInputIterator in(b); in; ++in) { out.put(*in); } - auto bucket = - out.getBucket(bmApply, /*shouldSynchronouslyIndex=*/false); + auto bucket = out.getBucket(bmApply); }; writeBucketFile(level.getCurr()); writeBucketFile(level.getSnap()); @@ -246,9 +208,10 @@ struct BucketListGenerator }; bool -doesBucketContain(std::shared_ptr bucket, const BucketEntry& be) +doesBucketContain(std::shared_ptr bucket, + const BucketEntry& be) { - for (BucketInputIterator iter(bucket); iter; ++iter) + for (LiveBucketInputIterator iter(bucket); iter; ++iter) { if (*iter == be) { @@ -259,9 +222,9 @@ doesBucketContain(std::shared_ptr bucket, const BucketEntry& be) } bool -doesBucketListContain(BucketList& bl, const BucketEntry& be) +doesBucketListContain(LiveBucketList& bl, const BucketEntry& be) { - for (uint32_t i = 0; i < BucketList::kNumLevels; ++i) + for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) { auto const& level = bl.getLevel(i); for (auto const& bucket : {level.getCurr(), level.getSnap()}) @@ -278,11 +241,10 @@ doesBucketListContain(BucketList& bl, const BucketEntry& be) struct SelectBucketListGenerator : public BucketListGenerator { uint32_t const mSelectLedger; - LedgerEntryType const mType; std::shared_ptr mSelected; - SelectBucketListGenerator(uint32_t selectLedger, LedgerEntryType type) - : mSelectLedger(selectLedger), mType(type) + SelectBucketListGenerator(uint32_t selectLedger) + : mSelectLedger(selectLedger) { } @@ -291,24 +253,35 @@ struct SelectBucketListGenerator : public BucketListGenerator { if (mLedgerSeq == mSelectLedger) { - UnorderedSet filteredKeys(mLiveKeys.size()); - std::copy_if( - mLiveKeys.begin(), mLiveKeys.end(), - std::inserter(filteredKeys, filteredKeys.end()), - [this](LedgerKey const& key) { return key.type() == mType; }); - - if (!filteredKeys.empty()) + if (!mLiveKeys.empty()) { stellar::uniform_int_distribution dist( - 0, filteredKeys.size() - 1); - auto iter = filteredKeys.begin(); + 0, mLiveKeys.size() - 1); + auto iter = mLiveKeys.begin(); std::advance(iter, dist(gRandomEngine)); mSelected = std::make_shared( ltx.loadWithoutRecord(*iter).current()); } } - return BucketListGenerator::generateLiveEntries(ltx); + + auto live = BucketListGenerator::generateLiveEntries(ltx); + + // Selected entry must not be shadowed + if (mSelected) + { + auto key = LedgerEntryKey(*mSelected); + for (size_t i = 0; i < live.size(); ++i) + { + if (LedgerEntryKey(live.at(i)) == key) + { + live.erase(live.begin() + i); + break; + } + } + } + + return live; } virtual std::vector @@ -337,10 +310,10 @@ class ApplyBucketsWorkAddEntry : public ApplyBucketsWork public: ApplyBucketsWorkAddEntry( Application& app, - std::map> const& buckets, + std::map> const& buckets, HistoryArchiveState const& applyState, uint32_t maxProtocolVersion, - std::function filter, LedgerEntry const& entry) - : ApplyBucketsWork(app, buckets, applyState, maxProtocolVersion, filter) + LedgerEntry const& entry) + : ApplyBucketsWork(app, buckets, applyState, maxProtocolVersion) , mEntry(entry) , mAdded{false} { @@ -356,13 +329,8 @@ class ApplyBucketsWorkAddEntry : public ApplyBucketsWork uint32_t maxLedger = std::numeric_limits::max() - 1; auto& ltxRoot = mApp.getLedgerTxnRoot(); - size_t count = 0; - for (auto let : xdr::xdr_traits::enum_values()) - { - count += ltxRoot.countObjects( - static_cast(let), - LedgerRange::inclusive(minLedger, maxLedger)); - } + auto count = ltxRoot.countOffers( + LedgerRange::inclusive(minLedger, maxLedger)); if (count > 0) { @@ -391,7 +359,7 @@ class ApplyBucketsWorkDeleteEntry : public ApplyBucketsWork public: ApplyBucketsWorkDeleteEntry( Application& app, - std::map> const& buckets, + std::map> const& buckets, HistoryArchiveState const& applyState, uint32_t maxProtocolVersion, LedgerEntry const& target) : ApplyBucketsWork(app, buckets, applyState, maxProtocolVersion) @@ -431,26 +399,6 @@ class ApplyBucketsWorkModifyEntry : public ApplyBucketsWork LedgerEntry const mEntry; bool mModified; - void - modifyAccountEntry(LedgerEntry& entry) - { - AccountEntry const& account = mEntry.data.account(); - entry.lastModifiedLedgerSeq = mEntry.lastModifiedLedgerSeq; - entry.data.account() = LedgerTestUtils::generateValidAccountEntry(5); - entry.data.account().accountID = account.accountID; - } - - void - modifyTrustLineEntry(LedgerEntry& entry) - { - TrustLineEntry const& trustLine = mEntry.data.trustLine(); - entry.lastModifiedLedgerSeq = mEntry.lastModifiedLedgerSeq; - entry.data.trustLine() = - LedgerTestUtils::generateValidTrustLineEntry(5); - entry.data.trustLine().accountID = trustLine.accountID; - entry.data.trustLine().asset = trustLine.asset; - } - void modifyOfferEntry(LedgerEntry& entry) { @@ -461,94 +409,10 @@ class ApplyBucketsWorkModifyEntry : public ApplyBucketsWork entry.data.offer().offerID = offer.offerID; } - void - modifyDataEntry(LedgerEntry& entry) - { - DataEntry const& data = mEntry.data.data(); - entry.lastModifiedLedgerSeq = mEntry.lastModifiedLedgerSeq; - do - { - entry.data.data() = LedgerTestUtils::generateValidDataEntry(5); - } while (entry.data.data().dataValue == data.dataValue); - entry.data.data().accountID = data.accountID; - entry.data.data().dataName = data.dataName; - } - - void - modifyClaimableBalanceEntry(LedgerEntry& entry) - { - ClaimableBalanceEntry const& cb = mEntry.data.claimableBalance(); - entry.lastModifiedLedgerSeq = mEntry.lastModifiedLedgerSeq; - entry.data.claimableBalance() = - LedgerTestUtils::generateValidClaimableBalanceEntry(5); - - entry.data.claimableBalance().balanceID = cb.balanceID; - } - - void - modifyLiquidityPoolEntry(LedgerEntry& entry) - { - LiquidityPoolEntry const& lp = mEntry.data.liquidityPool(); - entry.lastModifiedLedgerSeq = mEntry.lastModifiedLedgerSeq; - entry.data.liquidityPool() = - LedgerTestUtils::generateValidLiquidityPoolEntry(5); - - entry.data.liquidityPool().liquidityPoolID = lp.liquidityPoolID; - } - - void - modifyConfigSettingEntry(LedgerEntry& entry) - { - ConfigSettingEntry const& cfg = mEntry.data.configSetting(); - entry.lastModifiedLedgerSeq = mEntry.lastModifiedLedgerSeq; - entry.data.configSetting() = - LedgerTestUtils::generateValidConfigSettingEntry(5); - - entry.data.configSetting().configSettingID(cfg.configSettingID()); - } - - void - modifyContractDataEntry(LedgerEntry& entry) - { - ContractDataEntry const& cd = mEntry.data.contractData(); - entry.lastModifiedLedgerSeq = mEntry.lastModifiedLedgerSeq; - entry.data.contractData() = - LedgerTestUtils::generateValidContractDataEntry(5); - - entry.data.contractData().contract = cd.contract; - entry.data.contractData().key = cd.key; - } - - void - modifyContractCodeEntry(LedgerEntry& entry) - { - ContractCodeEntry const& cc = mEntry.data.contractCode(); - entry.lastModifiedLedgerSeq = mEntry.lastModifiedLedgerSeq; - - while (entry.data.contractCode().code == - mEntry.data.contractCode().code) - { - entry.data.contractCode() = - LedgerTestUtils::generateValidContractCodeEntry(5); - } - - entry.data.contractCode().hash = cc.hash; - } - - void - modifyTTLEntry(LedgerEntry& entry) - { - TTLEntry const& ee = mEntry.data.ttl(); - entry.lastModifiedLedgerSeq = mEntry.lastModifiedLedgerSeq; - entry.data.ttl() = LedgerTestUtils::generateValidTTLEntry(5); - - entry.data.ttl().keyHash = ee.keyHash; - } - public: ApplyBucketsWorkModifyEntry( Application& app, - std::map> const& buckets, + std::map> const& buckets, HistoryArchiveState const& applyState, uint32_t maxProtocolVersion, LedgerEntry const& target) : ApplyBucketsWork(app, buckets, applyState, maxProtocolVersion) @@ -567,41 +431,10 @@ class ApplyBucketsWorkModifyEntry : public ApplyBucketsWork auto entry = ltx.load(mKey); while (entry && entry.current() == mEntry) { - switch (mEntry.data.type()) - { - case ACCOUNT: - modifyAccountEntry(entry.current()); - break; - case TRUSTLINE: - modifyTrustLineEntry(entry.current()); - break; - case OFFER: - modifyOfferEntry(entry.current()); - break; - case DATA: - modifyDataEntry(entry.current()); - break; - case CLAIMABLE_BALANCE: - modifyClaimableBalanceEntry(entry.current()); - break; - case LIQUIDITY_POOL: - modifyLiquidityPoolEntry(entry.current()); - break; - case CONFIG_SETTING: - modifyConfigSettingEntry(entry.current()); - break; - case CONTRACT_DATA: - modifyContractDataEntry(entry.current()); - break; - case CONTRACT_CODE: - modifyContractCodeEntry(entry.current()); - break; - case TTL: - modifyTTLEntry(entry.current()); - break; - default: - REQUIRE(false); - } + releaseAssert( + BucketIndex::typeNotSupported(mEntry.data.type())); + + modifyOfferEntry(entry.current()); mModified = true; } @@ -653,168 +486,61 @@ TEST_CASE("BucketListIsConsistentWithDatabase empty ledgers", REQUIRE_NOTHROW(blg.applyBuckets()); } -TEST_CASE("BucketListIsConsistentWithDatabase test root account", - "[invariant][bucketlistconsistent]") -{ - struct TestRootBucketListGenerator : public BucketListGenerator - { - uint32_t const mTargetLedger; - bool mModifiedRoot; - - TestRootBucketListGenerator() - : mTargetLedger(stellar::uniform_int_distribution(2, 100)( - gRandomEngine)) - , mModifiedRoot(false) - { - } - - virtual std::vector - generateLiveEntries(AbstractLedgerTxn& ltx) - { - if (mLedgerSeq == mTargetLedger) - { - mModifiedRoot = true; - auto& app = mAppGenerate; - auto skey = SecretKey::fromSeed(app->getNetworkID()); - auto root = skey.getPublicKey(); - auto le = - stellar::loadAccountWithoutRecord(ltx, root).current(); - le.lastModifiedLedgerSeq = mLedgerSeq; - return {le}; - } - else - { - return BucketListGenerator::generateLiveEntries(ltx); - } - } - - virtual std::vector - generateDeadEntries(AbstractLedgerTxn& ltx) - { - return {}; - } - }; - - for (size_t j = 0; j < 5; ++j) - { - TestRootBucketListGenerator blg; - blg.generateLedgers(100); - REQUIRE(blg.mModifiedRoot); - REQUIRE_NOTHROW(blg.applyBuckets()); - } -} - TEST_CASE("BucketListIsConsistentWithDatabase added entries", "[invariant][bucketlistconsistent][acceptance]") { - auto runTest = [](bool withFilter) { - for (size_t nTests = 0; nTests < 40; ++nTests) - { - BucketListGenerator blg; - blg.generateLedgers(100); - - stellar::uniform_int_distribution addAtLedgerDist( - 2, blg.mLedgerSeq); - auto le = LedgerTestUtils::generateValidLedgerEntryWithExclusions( - {CONFIG_SETTING}, 5); - le.lastModifiedLedgerSeq = addAtLedgerDist(gRandomEngine); - - if (!withFilter) - { - auto filter = [](auto) { return true; }; - if (le.data.type() == CONFIG_SETTING) - { - // Config settings would have a duplicate key due to low key - // space. - REQUIRE_THROWS_AS( - blg.applyBuckets(filter, le), - std::runtime_error); - } - else - { - REQUIRE_THROWS_AS( - blg.applyBuckets(filter, le), - InvariantDoesNotHold); - } - } - else - { - auto filter = [&](auto let) { return let != le.data.type(); }; - REQUIRE_NOTHROW( - blg.applyBuckets(filter, le)); - } - } - }; - - runTest(true); + for (size_t nTests = 0; nTests < 40; ++nTests) + { + BucketListGenerator blg; + blg.generateLedgers(100); - // This tests the filtering behavior of BucketListIsConsistentWithDatabase - // because the bucket apply will not add anything of the specified - // LedgerEntryType, but we will inject an additional LedgerEntry of that - // type anyway. But it shouldn't throw because the invariant isn't looking - // for those changes. - runTest(false); + stellar::uniform_int_distribution addAtLedgerDist( + 2, blg.mLedgerSeq); + auto le = + LedgerTestUtils::generateValidLedgerEntryWithTypes({OFFER}, 10); + le.lastModifiedLedgerSeq = addAtLedgerDist(gRandomEngine); + REQUIRE_THROWS_AS(blg.applyBuckets(le), + InvariantDoesNotHold); + } } TEST_CASE("BucketListIsConsistentWithDatabase deleted entries", "[invariant][bucketlistconsistent][acceptance]") { - for (auto t : xdr::xdr_traits::enum_values()) + size_t nTests = 0; + while (nTests < 10) { - size_t nTests = 0; - while (nTests < 10) + SelectBucketListGenerator blg(100); + blg.generateLedgers(100); + if (!blg.mSelected) { - SelectBucketListGenerator blg(100, static_cast(t)); - blg.generateLedgers(100); - if (!blg.mSelected) - { - continue; - } - if (t == CONFIG_SETTING) - { - // Configuration can not be deleted. - REQUIRE_THROWS_AS(blg.applyBuckets( - *blg.mSelected), - std::runtime_error); - } - else - { - REQUIRE_THROWS_AS(blg.applyBuckets( - *blg.mSelected), - InvariantDoesNotHold); - } - ++nTests; + continue; } + + REQUIRE_THROWS_AS( + blg.applyBuckets(*blg.mSelected), + InvariantDoesNotHold); + ++nTests; } } TEST_CASE("BucketListIsConsistentWithDatabase modified entries", "[invariant][bucketlistconsistent][acceptance]") { - for (auto t : xdr::xdr_traits::enum_values()) + size_t nTests = 0; + while (nTests < 10) { - // Skip CONFIG_SETTING for now because the test modification test does - // not work unless blg itself updates the entry. - if (t == CONFIG_SETTING) + SelectBucketListGenerator blg(100); + blg.generateLedgers(100); + if (!blg.mSelected) { continue; } - size_t nTests = 0; - while (nTests < 10) - { - SelectBucketListGenerator blg(100, static_cast(t)); - blg.generateLedgers(100); - if (!blg.mSelected) - { - continue; - } - - REQUIRE_THROWS_AS( - blg.applyBuckets(*blg.mSelected), - InvariantDoesNotHold); - ++nTests; - } + REQUIRE_THROWS_AS( + blg.applyBuckets(*blg.mSelected), + InvariantDoesNotHold); + ++nTests; } } @@ -857,15 +583,15 @@ TEST_CASE("BucketListIsConsistentWithDatabase bucket bounds", } }; - for (uint32_t level = 0; level < BucketList::kNumLevels; ++level) + for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level) { - uint32_t oldestLedger = BucketList::oldestLedgerInSnap(101, level); + uint32_t oldestLedger = LiveBucketList::oldestLedgerInSnap(101, level); if (oldestLedger == std::numeric_limits::max()) { break; } - uint32_t newestLedger = BucketList::oldestLedgerInCurr(101, level) + - BucketList::sizeOfCurr(101, level) - 1; + uint32_t newestLedger = LiveBucketList::oldestLedgerInCurr(101, level) + + LiveBucketList::sizeOfCurr(101, level) - 1; stellar::uniform_int_distribution ledgerToModifyDist( std::max(2u, oldestLedger), newestLedger); @@ -874,20 +600,21 @@ TEST_CASE("BucketListIsConsistentWithDatabase bucket bounds", uint32_t ledgerToModify = ledgerToModifyDist(gRandomEngine); uint32_t maxLowTargetLedger = 0; uint32_t minHighTargetLedger = 0; - if (ledgerToModify >= BucketList::oldestLedgerInCurr(101, level)) + if (ledgerToModify >= + LiveBucketList::oldestLedgerInCurr(101, level)) { maxLowTargetLedger = - BucketList::oldestLedgerInCurr(101, level) - 1; + LiveBucketList::oldestLedgerInCurr(101, level) - 1; minHighTargetLedger = - BucketList::oldestLedgerInCurr(101, level) + - BucketList::sizeOfCurr(101, level); + LiveBucketList::oldestLedgerInCurr(101, level) + + LiveBucketList::sizeOfCurr(101, level); } else { maxLowTargetLedger = - BucketList::oldestLedgerInSnap(101, level) - 1; + LiveBucketList::oldestLedgerInSnap(101, level) - 1; minHighTargetLedger = - BucketList::oldestLedgerInCurr(101, level); + LiveBucketList::oldestLedgerInCurr(101, level); } stellar::uniform_int_distribution lowTargetLedgerDist( 1, maxLowTargetLedger); @@ -913,8 +640,8 @@ TEST_CASE("BucketListIsConsistentWithDatabase merged LIVEENTRY and DEADENTRY", { uint32_t const mTargetLedger; - MergeBucketListGenerator(LedgerEntryType let) - : SelectBucketListGenerator(25, let), mTargetLedger(110) + MergeBucketListGenerator() + : SelectBucketListGenerator(25), mTargetLedger(110) { } @@ -937,72 +664,61 @@ TEST_CASE("BucketListIsConsistentWithDatabase merged LIVEENTRY and DEADENTRY", return (bool)ltx.load(LedgerEntryKey(le)); }; - auto cfg = getTestConfig(1, Config::TESTDB_IN_MEMORY_OFFERS); + auto cfg = getTestConfig(1); cfg.OVERRIDE_EVICTION_PARAMS_FOR_TESTING = true; cfg.TESTING_STARTING_EVICTION_SCAN_LEVEL = 1; - testutil::BucketListDepthModifier bldm(3); - for (auto t : xdr::xdr_traits::enum_values()) + testutil::BucketListDepthModifier bldm(3); + uint32_t nTests = 0; + while (nTests < 5) { - if (t == CONFIG_SETTING) + MergeBucketListGenerator blg; + auto& blGenerate = + blg.mAppGenerate->getBucketManager().getLiveBucketList(); + + blg.generateLedgers(100); + if (!blg.mSelected) { - // Merge logic is not applicable to configuration. continue; } - uint32_t nTests = 0; - while (nTests < 5) - { - MergeBucketListGenerator blg(static_cast(t)); - auto& blGenerate = - blg.mAppGenerate->getBucketManager().getBucketList(); - - blg.generateLedgers(100); - if (!blg.mSelected) - { - continue; - } - - BucketEntry dead(DEADENTRY); - dead.deadEntry() = LedgerEntryKey(*blg.mSelected); - BucketEntry live(LIVEENTRY); - live.liveEntry() = *blg.mSelected; - BucketEntry init(INITENTRY); - init.liveEntry() = *blg.mSelected; - - { - VirtualClock clock; - Application::pointer appApply = - createTestApplication(clock, cfg); - REQUIRE_NOTHROW(blg.applyBuckets(appApply)); - REQUIRE(exists(*blg.mAppGenerate, *blg.mSelected)); - REQUIRE(exists(*appApply, *blg.mSelected)); - } + BucketEntry dead(DEADENTRY); + dead.deadEntry() = LedgerEntryKey(*blg.mSelected); + BucketEntry live(LIVEENTRY); + live.liveEntry() = *blg.mSelected; + BucketEntry init(INITENTRY); + init.liveEntry() = *blg.mSelected; - blg.generateLedgers(10); - REQUIRE(doesBucketListContain(blGenerate, dead)); - REQUIRE((doesBucketListContain(blGenerate, live) || - doesBucketListContain(blGenerate, init))); + { + VirtualClock clock; + Application::pointer appApply = createTestApplication(clock, cfg); + REQUIRE_NOTHROW(blg.applyBuckets(appApply)); + REQUIRE(exists(*blg.mAppGenerate, *blg.mSelected)); + REQUIRE(exists(*appApply, *blg.mSelected)); + } - blg.generateLedgers(100); - REQUIRE(!doesBucketListContain(blGenerate, dead)); - REQUIRE(!(doesBucketListContain(blGenerate, live) || - doesBucketListContain(blGenerate, init))); - REQUIRE(!exists(*blg.mAppGenerate, *blg.mSelected)); + blg.generateLedgers(10); + REQUIRE(doesBucketListContain(blGenerate, dead)); + REQUIRE((doesBucketListContain(blGenerate, live) || + doesBucketListContain(blGenerate, init))); - { - VirtualClock clock; - Application::pointer appApply = - createTestApplication(clock, cfg); - REQUIRE_NOTHROW(blg.applyBuckets(appApply)); - auto& blApply = appApply->getBucketManager().getBucketList(); - REQUIRE(!doesBucketListContain(blApply, dead)); - REQUIRE(!(doesBucketListContain(blApply, live) || - doesBucketListContain(blApply, init))); - REQUIRE(!exists(*appApply, *blg.mSelected)); - } + blg.generateLedgers(100); + REQUIRE(!doesBucketListContain(blGenerate, dead)); + REQUIRE(!(doesBucketListContain(blGenerate, live) || + doesBucketListContain(blGenerate, init))); + REQUIRE(!exists(*blg.mAppGenerate, *blg.mSelected)); - ++nTests; + { + VirtualClock clock; + Application::pointer appApply = createTestApplication(clock, cfg); + REQUIRE_NOTHROW(blg.applyBuckets(appApply)); + auto& blApply = appApply->getBucketManager().getLiveBucketList(); + REQUIRE(!doesBucketListContain(blApply, dead)); + REQUIRE(!(doesBucketListContain(blApply, live) || + doesBucketListContain(blApply, init))); + REQUIRE(!exists(*appApply, *blg.mSelected)); } + + ++nTests; } } diff --git a/src/invariant/test/ConservationOfLumensTests.cpp b/src/invariant/test/ConservationOfLumensTests.cpp index 6b91b127b0..e5686c70ef 100644 --- a/src/invariant/test/ConservationOfLumensTests.cpp +++ b/src/invariant/test/ConservationOfLumensTests.cpp @@ -153,7 +153,7 @@ TEST_CASE("Fee pool change without inflation", TEST_CASE("Account balances changed without inflation", "[invariant][conservationoflumens]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.INVARIANT_CHECKS = {"ConservationOfLumens"}; uint32_t const N = 10; @@ -187,7 +187,7 @@ TEST_CASE("Account balances changed without inflation", TEST_CASE("Account balances unchanged without inflation", "[invariant][conservationoflumens]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.INVARIANT_CHECKS = {"ConservationOfLumens"}; uint32_t const N = 10; @@ -228,7 +228,7 @@ TEST_CASE("Account balances unchanged without inflation", TEST_CASE("Inflation changes are consistent", "[invariant][conservationoflumens]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.INVARIANT_CHECKS = {"ConservationOfLumens"}; stellar::uniform_int_distribution payoutsDist(1, 100); stellar::uniform_int_distribution amountDist(1, 100000); diff --git a/src/invariant/test/InvariantTests.cpp b/src/invariant/test/InvariantTests.cpp index 020e94037d..857056e51f 100644 --- a/src/invariant/test/InvariantTests.cpp +++ b/src/invariant/test/InvariantTests.cpp @@ -54,9 +54,10 @@ class TestInvariant : public Invariant } virtual std::string - checkOnBucketApply(std::shared_ptr bucket, - uint32_t oldestLedger, uint32_t newestLedger, - std::function filter) override + checkOnBucketApply( + std::shared_ptr bucket, uint32_t oldestLedger, + uint32_t newestLedger, + std::unordered_set const& shadowedKeys) override { return mShouldFail ? "fail" : ""; } @@ -164,14 +165,13 @@ TEST_CASE("onBucketApply fail succeed", "[invariant]") app->getInvariantManager().enableInvariant( TestInvariant::toString(0, true)); - auto bucket = std::make_shared(); + auto bucket = std::make_shared(); uint32_t ledger = 1; uint32_t level = 0; bool isCurr = true; - REQUIRE_THROWS_AS( - app->getInvariantManager().checkOnBucketApply( - bucket, ledger, level, isCurr, [](auto) { return true; }), - InvariantDoesNotHold); + REQUIRE_THROWS_AS(app->getInvariantManager().checkOnBucketApply( + bucket, ledger, level, isCurr, {}), + InvariantDoesNotHold); } { @@ -184,12 +184,12 @@ TEST_CASE("onBucketApply fail succeed", "[invariant]") app->getInvariantManager().enableInvariant( TestInvariant::toString(0, false)); - auto bucket = std::make_shared(); + auto bucket = std::make_shared(); uint32_t ledger = 1; uint32_t level = 0; bool isCurr = true; REQUIRE_NOTHROW(app->getInvariantManager().checkOnBucketApply( - bucket, ledger, level, isCurr, [](auto) { return true; })); + bucket, ledger, level, isCurr, {})); } } diff --git a/src/invariant/test/LedgerEntryIsValidTests.cpp b/src/invariant/test/LedgerEntryIsValidTests.cpp index 4d946183ee..082066e6e7 100644 --- a/src/invariant/test/LedgerEntryIsValidTests.cpp +++ b/src/invariant/test/LedgerEntryIsValidTests.cpp @@ -19,7 +19,7 @@ using namespace stellar::InvariantTestUtils; TEST_CASE("Trigger validity check for each entry type", "[invariant][ledgerentryisvalid]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.INVARIANT_CHECKS = {"LedgerEntryIsValid"}; VirtualClock clock; @@ -67,7 +67,7 @@ TEST_CASE("Trigger validity check for each entry type", TEST_CASE("Modify ClaimableBalanceEntry", "[invariant][ledgerentryisvalid][claimablebalance]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.INVARIANT_CHECKS = {"LedgerEntryIsValid"}; VirtualClock clock; diff --git a/src/invariant/test/LiabilitiesMatchOffersTests.cpp b/src/invariant/test/LiabilitiesMatchOffersTests.cpp index 1c95224341..c4de34c9c0 100644 --- a/src/invariant/test/LiabilitiesMatchOffersTests.cpp +++ b/src/invariant/test/LiabilitiesMatchOffersTests.cpp @@ -58,7 +58,7 @@ updateAccountWithRandomBalance(LedgerEntry le, Application& app, TEST_CASE("Create account above minimum balance", "[invariant][liabilitiesmatchoffers]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.INVARIANT_CHECKS = {"LiabilitiesMatchOffers"}; for (uint32_t i = 0; i < 10; ++i) @@ -75,7 +75,7 @@ TEST_CASE("Create account above minimum balance", TEST_CASE("Create account below minimum balance", "[invariant][liabilitiesmatchoffers]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.INVARIANT_CHECKS = {"LiabilitiesMatchOffers"}; for (uint32_t i = 0; i < 10; ++i) @@ -92,7 +92,7 @@ TEST_CASE("Create account below minimum balance", TEST_CASE("Create account then decrease balance below minimum", "[invariant][liabilitiesmatchoffers]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.INVARIANT_CHECKS = {"LiabilitiesMatchOffers"}; for (uint32_t i = 0; i < 10; ++i) @@ -111,7 +111,7 @@ TEST_CASE("Create account then decrease balance below minimum", TEST_CASE("Account below minimum balance increases but stays below minimum", "[invariant][liabilitiesmatchoffers]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.INVARIANT_CHECKS = {"LiabilitiesMatchOffers"}; for (uint32_t i = 0; i < 10; ++i) @@ -130,7 +130,7 @@ TEST_CASE("Account below minimum balance increases but stays below minimum", TEST_CASE("Account below minimum balance decreases", "[invariant][liabilitiesmatchoffers]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.INVARIANT_CHECKS = {"LiabilitiesMatchOffers"}; for (uint32_t i = 0; i < 10; ++i) @@ -250,7 +250,7 @@ generateBuyingLiabilities(Application& app, LedgerEntry offer, bool excess, TEST_CASE("Create account then increase liabilities without changing balance", "[invariant][liabilitiesmatchoffers]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.INVARIANT_CHECKS = {"LiabilitiesMatchOffers"}; VirtualClock clock; @@ -289,7 +289,7 @@ TEST_CASE("Create account then increase liabilities without changing balance", TEST_CASE("Invariant for liabilities", "[invariant][liabilitiesmatchoffers]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.INVARIANT_CHECKS = {"LiabilitiesMatchOffers"}; VirtualClock clock; diff --git a/src/invariant/test/OrderBookIsNotCrossedTests.cpp b/src/invariant/test/OrderBookIsNotCrossedTests.cpp index c10a6a5daf..7e3b1ab2c4 100644 --- a/src/invariant/test/OrderBookIsNotCrossedTests.cpp +++ b/src/invariant/test/OrderBookIsNotCrossedTests.cpp @@ -109,7 +109,7 @@ TEST_CASE("OrderBookIsNotCrossed in-memory order book is consistent with " "[invariant][OrderBookIsNotCrossed]") { VirtualClock clock; - auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS); + auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); // When testing the order book not crossed invariant, enable it and no other // invariants (these tests do things which violate other invariants). cfg.INVARIANT_CHECKS = {}; @@ -185,7 +185,7 @@ TEST_CASE("OrderBookIsNotCrossed properly throws if order book is crossed", { VirtualClock clock; - auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS); + auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); // When testing the order book not crossed invariant, enable it and no other // invariants (these tests do things which violate other invariants). cfg.INVARIANT_CHECKS = {}; diff --git a/src/invariant/test/SponsorshipCountIsValidTests.cpp b/src/invariant/test/SponsorshipCountIsValidTests.cpp index 9f35cd5292..91d75c805b 100644 --- a/src/invariant/test/SponsorshipCountIsValidTests.cpp +++ b/src/invariant/test/SponsorshipCountIsValidTests.cpp @@ -18,7 +18,7 @@ using namespace stellar::InvariantTestUtils; TEST_CASE("sponsorship invariant", "[invariant][sponsorshipcountisvalid]") { VirtualClock clock; - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.INVARIANT_CHECKS = {"SponsorshipCountIsValid"}; auto app = createTestApplication(clock, cfg); diff --git a/src/ledger/InMemoryLedgerTxn.cpp b/src/ledger/InMemoryLedgerTxn.cpp index bcdaca07a2..1ba5e5e7af 100644 --- a/src/ledger/InMemoryLedgerTxn.cpp +++ b/src/ledger/InMemoryLedgerTxn.cpp @@ -4,9 +4,12 @@ #include "ledger/InMemoryLedgerTxn.h" #include "crypto/SecretKey.h" +#include "ledger/InMemoryLedgerTxnRoot.h" +#include "ledger/LedgerTxn.h" #include "ledger/LedgerTxnImpl.h" #include "transactions/TransactionUtils.h" #include "util/GlobalChecks.h" +#include "util/UnorderedMap.h" #include "util/XDROperators.h" namespace stellar @@ -73,8 +76,9 @@ InMemoryLedgerTxn::FilteredEntryIteratorImpl::clone() const } InMemoryLedgerTxn::InMemoryLedgerTxn(InMemoryLedgerTxnRoot& parent, - Database& db) - : LedgerTxn(parent), mDb(db) + Database& db, + AbstractLedgerTxnParent* realRoot) + : LedgerTxn(parent), mDb(db), mRealRootForOffers(realRoot) { } @@ -141,6 +145,36 @@ InMemoryLedgerTxn::updateLedgerKeyMap(EntryIterator iter) { auto const& genKey = iter.key(); updateLedgerKeyMap(genKey, iter.entryExists()); + + // In addition to maintaining in-memory map, commit offers to "real" ltx + // root to test SQL backed offers + if (mRealRootForOffers && + genKey.type() == InternalLedgerEntryType::LEDGER_ENTRY) + { + auto const& ledgerKey = genKey.ledgerKey(); + if (ledgerKey.type() == OFFER) + { + LedgerTxn ltx(*mRealRootForOffers); + if (!iter.entryExists()) + { + ltx.erase(ledgerKey); + } + else + { + auto ltxe = ltx.load(genKey); + if (!ltxe) + { + ltx.create(iter.entry()); + } + else + { + ltxe.current() = iter.entry().ledgerEntry(); + } + } + + ltx.commit(); + } + } } } @@ -332,4 +366,74 @@ InMemoryLedgerTxn::getPoolShareTrustLinesByAccountAndAsset( return res; } +void +InMemoryLedgerTxn::dropOffers(bool rebuild) +{ + if (mRealRootForOffers) + { + mRealRootForOffers->dropOffers(rebuild); + } + else + { + LedgerTxn::dropOffers(rebuild); + } +} + +uint64_t +InMemoryLedgerTxn::countOffers(LedgerRange const& ledgers) const +{ + if (mRealRootForOffers) + { + return mRealRootForOffers->countOffers(ledgers); + } + + return LedgerTxn::countOffers(ledgers); +} + +void +InMemoryLedgerTxn::deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const +{ + if (mRealRootForOffers) + { + mRealRootForOffers->deleteOffersModifiedOnOrAfterLedger(ledger); + } + else + { + LedgerTxn::deleteOffersModifiedOnOrAfterLedger(ledger); + } +} + +UnorderedMap +InMemoryLedgerTxn::getAllOffers() +{ + if (mRealRootForOffers) + { + return mRealRootForOffers->getAllOffers(); + } + + return LedgerTxn::getAllOffers(); +} + +std::shared_ptr +InMemoryLedgerTxn::getBestOffer(Asset const& buying, Asset const& selling) +{ + if (mRealRootForOffers) + { + return mRealRootForOffers->getBestOffer(buying, selling); + } + + return LedgerTxn::getBestOffer(buying, selling); +} + +std::shared_ptr +InMemoryLedgerTxn::getBestOffer(Asset const& buying, Asset const& selling, + OfferDescriptor const& worseThan) +{ + if (mRealRootForOffers) + { + return mRealRootForOffers->getBestOffer(buying, selling, worseThan); + } + + return LedgerTxn::getBestOffer(buying, selling, worseThan); +} } diff --git a/src/ledger/InMemoryLedgerTxn.h b/src/ledger/InMemoryLedgerTxn.h index 76cf56fcae..2f8d03d3ed 100644 --- a/src/ledger/InMemoryLedgerTxn.h +++ b/src/ledger/InMemoryLedgerTxn.h @@ -44,6 +44,12 @@ class InMemoryLedgerTxn : public LedgerTxn Database& mDb; std::unique_ptr mTransaction; + // For some tests, we need to bypass ledger close and commit directly to the + // in-memory ltx. However, we still want to test SQL backed offers. The + // "never" committing root sets this flag to true such that offer-related + // calls get based to the real SQL backed root + AbstractLedgerTxnParent* const mRealRootForOffers; + UnorderedMap> mOffersAndPoolShareTrustlineKeys; @@ -75,7 +81,8 @@ class InMemoryLedgerTxn : public LedgerTxn EntryIterator getFilteredEntryIterator(EntryIterator const& iter); public: - InMemoryLedgerTxn(InMemoryLedgerTxnRoot& parent, Database& db); + InMemoryLedgerTxn(InMemoryLedgerTxnRoot& parent, Database& db, + AbstractLedgerTxnParent* realRoot = nullptr); virtual ~InMemoryLedgerTxn(); void addChild(AbstractLedgerTxn& child, TransactionMode mode) override; @@ -100,6 +107,19 @@ class InMemoryLedgerTxn : public LedgerTxn UnorderedMap getPoolShareTrustLinesByAccountAndAsset(AccountID const& account, Asset const& asset) override; + + // These functions call into the real LedgerTxn root to test offer SQL + // related functionality + UnorderedMap getAllOffers() override; + std::shared_ptr + getBestOffer(Asset const& buying, Asset const& selling) override; + std::shared_ptr + getBestOffer(Asset const& buying, Asset const& selling, + OfferDescriptor const& worseThan) override; + + void dropOffers(bool rebuild) override; + uint64_t countOffers(LedgerRange const& ledgers) const override; + void deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const override; }; } diff --git a/src/ledger/InMemoryLedgerTxnRoot.cpp b/src/ledger/InMemoryLedgerTxnRoot.cpp index 386ceb2e93..891a493ea8 100644 --- a/src/ledger/InMemoryLedgerTxnRoot.cpp +++ b/src/ledger/InMemoryLedgerTxnRoot.cpp @@ -97,74 +97,22 @@ InMemoryLedgerTxnRoot::getNewestVersion(InternalLedgerKey const& key) const } uint64_t -InMemoryLedgerTxnRoot::countObjects(LedgerEntryType let) const -{ - return 0; -} - -uint64_t -InMemoryLedgerTxnRoot::countObjects(LedgerEntryType let, - LedgerRange const& ledgers) const +InMemoryLedgerTxnRoot::countOffers(LedgerRange const& ledgers) const { return 0; } void -InMemoryLedgerTxnRoot::deleteObjectsModifiedOnOrAfterLedger( +InMemoryLedgerTxnRoot::deleteOffersModifiedOnOrAfterLedger( uint32_t ledger) const { } -void -InMemoryLedgerTxnRoot::dropAccounts(bool) -{ -} - -void -InMemoryLedgerTxnRoot::dropData(bool) -{ -} - void InMemoryLedgerTxnRoot::dropOffers(bool) { } -void -InMemoryLedgerTxnRoot::dropTrustLines(bool) -{ -} - -void -InMemoryLedgerTxnRoot::dropClaimableBalances(bool) -{ -} - -void -InMemoryLedgerTxnRoot::dropLiquidityPools(bool) -{ -} - -void -InMemoryLedgerTxnRoot::dropContractData(bool) -{ -} - -void -InMemoryLedgerTxnRoot::dropContractCode(bool) -{ -} - -void -InMemoryLedgerTxnRoot::dropConfigSettings(bool) -{ -} - -void -InMemoryLedgerTxnRoot::dropTTL(bool) -{ -} - double InMemoryLedgerTxnRoot::getPrefetchHitRate() const { diff --git a/src/ledger/InMemoryLedgerTxnRoot.h b/src/ledger/InMemoryLedgerTxnRoot.h index 5d4bc3fe19..647bc20823 100644 --- a/src/ledger/InMemoryLedgerTxnRoot.h +++ b/src/ledger/InMemoryLedgerTxnRoot.h @@ -64,22 +64,11 @@ class InMemoryLedgerTxnRoot : public AbstractLedgerTxnParent std::shared_ptr getNewestVersion(InternalLedgerKey const& key) const override; - uint64_t countObjects(LedgerEntryType let) const override; - uint64_t countObjects(LedgerEntryType let, - LedgerRange const& ledgers) const override; + uint64_t countOffers(LedgerRange const& ledgers) const override; - void deleteObjectsModifiedOnOrAfterLedger(uint32_t ledger) const override; + void deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const override; - void dropAccounts(bool rebuild) override; - void dropData(bool rebuild) override; void dropOffers(bool rebuild) override; - void dropTrustLines(bool rebuild) override; - void dropClaimableBalances(bool rebuild) override; - void dropLiquidityPools(bool rebuild) override; - void dropContractData(bool rebuild) override; - void dropContractCode(bool rebuild) override; - void dropConfigSettings(bool rebuild) override; - void dropTTL(bool rebuild) override; double getPrefetchHitRate() const override; uint32_t prefetchClassic(UnorderedSet const& keys) override; uint32_t prefetchSoroban(UnorderedSet const& keys, diff --git a/src/ledger/LedgerCloseMetaFrame.cpp b/src/ledger/LedgerCloseMetaFrame.cpp index b4ef7a9429..49d67f3f3d 100644 --- a/src/ledger/LedgerCloseMetaFrame.cpp +++ b/src/ledger/LedgerCloseMetaFrame.cpp @@ -8,6 +8,7 @@ #include "transactions/TransactionMetaFrame.h" #include "util/GlobalChecks.h" #include "util/ProtocolVersion.h" +#include "xdr/Stellar-ledger.h" namespace stellar { @@ -144,7 +145,7 @@ LedgerCloseMetaFrame::populateTxSet(TxSetXDRFrame const& txSet) } void -LedgerCloseMetaFrame::populateEvictedEntries( +LedgerCloseMetaFrame::populateEvictedEntriesLegacy( LedgerEntryChanges const& evictionChanges) { releaseAssert(mVersion == 1); @@ -153,6 +154,9 @@ LedgerCloseMetaFrame::populateEvictedEntries( switch (change.type()) { case LEDGER_ENTRY_CREATED: +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + case LEDGER_ENTRY_RESTORED: +#endif throw std::runtime_error("unexpected create in eviction meta"); case LEDGER_ENTRY_STATE: continue; @@ -170,6 +174,25 @@ LedgerCloseMetaFrame::populateEvictedEntries( } } +void +LedgerCloseMetaFrame::populateEvictedEntries( + std::pair, std::vector> const& + evictedEntries) +{ + releaseAssert(mVersion == 1); + for (auto const& key : evictedEntries.first) + { + releaseAssertOrThrow(isTemporaryEntry(key) || key.type() == TTL); + mLedgerCloseMeta.v1().evictedTemporaryLedgerKeys.emplace_back(key); + } + for (auto const& entry : evictedEntries.second) + { + releaseAssertOrThrow(isPersistentEntry(entry.data)); + mLedgerCloseMeta.v1().evictedPersistentLedgerEntries.emplace_back( + entry); + } +} + void LedgerCloseMetaFrame::setNetworkConfiguration( SorobanNetworkConfig const& networkConfig, bool emitExtV1) diff --git a/src/ledger/LedgerCloseMetaFrame.h b/src/ledger/LedgerCloseMetaFrame.h index 60ad7c786a..e5b91d9143 100644 --- a/src/ledger/LedgerCloseMetaFrame.h +++ b/src/ledger/LedgerCloseMetaFrame.h @@ -30,7 +30,14 @@ class LedgerCloseMetaFrame void populateTxSet(TxSetXDRFrame const& txSet); - void populateEvictedEntries(LedgerEntryChanges const& evictionChanges); + // Used for populating meta from legacy serial eviction scan + void + populateEvictedEntriesLegacy(LedgerEntryChanges const& evictionChanges); + + // Used for populating meta from background eviction scan + void populateEvictedEntries( + std::pair, std::vector> const& + evictedEntries); void setNetworkConfiguration(SorobanNetworkConfig const& networkConfig, bool emitExtV1); diff --git a/src/ledger/LedgerManager.h b/src/ledger/LedgerManager.h index 88d0ca8bb8..aacc186215 100644 --- a/src/ledger/LedgerManager.h +++ b/src/ledger/LedgerManager.h @@ -172,7 +172,7 @@ class LedgerManager virtual void startCatchup(CatchupConfiguration configuration, std::shared_ptr archive, - std::set> bucketsToRetain) = 0; + std::set> bucketsToRetain) = 0; // Forcibly close the current ledger, applying `ledgerData` as the consensus // changes. This is normally done automatically as part of diff --git a/src/ledger/LedgerManagerImpl.cpp b/src/ledger/LedgerManagerImpl.cpp index 2b7f328671..ec38842418 100644 --- a/src/ledger/LedgerManagerImpl.cpp +++ b/src/ledger/LedgerManagerImpl.cpp @@ -351,41 +351,35 @@ LedgerManagerImpl::loadLastKnownLedger(bool restoreBucketlist, releaseAssert(latestLedgerHeader.has_value()); - // Step 3. Restore BucketList if we're doing a full core startup - // (startServices=true), OR when using BucketListDB - if (restoreBucketlist || mApp.getConfig().isUsingBucketListDB()) + HistoryArchiveState has = getLastClosedLedgerHAS(); + auto missing = mApp.getBucketManager().checkForMissingBucketsFiles(has); + auto pubmissing = + mApp.getHistoryManager().getMissingBucketsReferencedByPublishQueue(); + missing.insert(missing.end(), pubmissing.begin(), pubmissing.end()); + if (!missing.empty()) { - HistoryArchiveState has = getLastClosedLedgerHAS(); - auto missing = mApp.getBucketManager().checkForMissingBucketsFiles(has); - auto pubmissing = mApp.getHistoryManager() - .getMissingBucketsReferencedByPublishQueue(); - missing.insert(missing.end(), pubmissing.begin(), pubmissing.end()); - if (!missing.empty()) + CLOG_ERROR(Ledger, "{} buckets are missing from bucket directory '{}'", + missing.size(), mApp.getBucketManager().getBucketDir()); + throw std::runtime_error("Bucket directory is corrupt"); + } + + if (mApp.getConfig().MODE_ENABLES_BUCKETLIST) + { + // Only restart merges in full startup mode. Many modes in core + // (standalone offline commands, in-memory setup) do not need to + // spin up expensive merge processes. + auto assumeStateWork = + mApp.getWorkScheduler().executeWork( + has, latestLedgerHeader->ledgerVersion, restoreBucketlist); + if (assumeStateWork->getState() == BasicWork::State::WORK_SUCCESS) { - CLOG_ERROR(Ledger, - "{} buckets are missing from bucket directory '{}'", - missing.size(), mApp.getBucketManager().getBucketDir()); - throw std::runtime_error("Bucket directory is corrupt"); + CLOG_INFO(Ledger, "Assumed bucket-state for LCL: {}", + ledgerAbbrev(*latestLedgerHeader)); } - - if (mApp.getConfig().MODE_ENABLES_BUCKETLIST) + else { - // Only restart merges in full startup mode. Many modes in core - // (standalone offline commands, in-memory setup) do not need to - // spin up expensive merge processes. - auto assumeStateWork = - mApp.getWorkScheduler().executeWork( - has, latestLedgerHeader->ledgerVersion, restoreBucketlist); - if (assumeStateWork->getState() == BasicWork::State::WORK_SUCCESS) - { - CLOG_INFO(Ledger, "Assumed bucket-state for LCL: {}", - ledgerAbbrev(*latestLedgerHeader)); - } - else - { - // Work should only fail during graceful shutdown - releaseAssertOrThrow(mApp.isStopping()); - } + // Work should only fail during graceful shutdown + releaseAssertOrThrow(mApp.isStopping()); } } @@ -731,7 +725,7 @@ LedgerManagerImpl::closeLedgerIf(LedgerCloseData const& ledgerData) void LedgerManagerImpl::startCatchup( CatchupConfiguration configuration, std::shared_ptr archive, - std::set> bucketsToRetain) + std::set> bucketsToRetain) { ZoneScoped; setState(LM_CATCHING_UP_STATE); @@ -1047,11 +1041,10 @@ LedgerManagerImpl::closeLedger(LedgerCloseData const& ledgerData) ltx.commit(); // step 3 - if (protocolVersionStartsFrom(initialLedgerVers, - SOROBAN_PROTOCOL_VERSION) && - mApp.getConfig().isUsingBackgroundEviction()) + if (protocolVersionStartsFrom(initialLedgerVers, SOROBAN_PROTOCOL_VERSION)) { - mApp.getBucketManager().startBackgroundEvictionScan(ledgerSeq + 1); + mApp.getBucketManager().startBackgroundEvictionScan(ledgerSeq + 1, + initialLedgerVers); } // step 4 @@ -1307,13 +1300,16 @@ LedgerManagerImpl::advanceLedgerPointers(LedgerHeader const& header, mLastClosedLedger.hash = ledgerHash; mLastClosedLedger.header = header; - if (mApp.getConfig().isUsingBucketListDB() && - header.ledgerSeq != prevLedgerSeq) + if (header.ledgerSeq != prevLedgerSeq) { - mApp.getBucketManager() - .getBucketSnapshotManager() - .updateCurrentSnapshot(std::make_unique( - mApp.getBucketManager().getBucketList(), header)); + auto& bm = mApp.getBucketManager(); + auto liveSnapshot = std::make_unique>( + bm.getLiveBucketList(), header); + auto hotArchiveSnapshot = + std::make_unique>( + bm.getHotArchiveBucketList(), header); + bm.getBucketSnapshotManager().updateCurrentSnapshot( + std::move(liveSnapshot), std::move(hotArchiveSnapshot)); } } @@ -1485,10 +1481,7 @@ LedgerManagerImpl::prefetchTransactionData( { if (tx->isSoroban()) { - if (mApp.getConfig().isUsingBucketListDB()) - { - tx->insertKeysForTxApply(sorobanKeys, lkMeter.get()); - } + tx->insertKeysForTxApply(sorobanKeys, lkMeter.get()); } else { @@ -1497,14 +1490,11 @@ LedgerManagerImpl::prefetchTransactionData( } // Prefetch classic and soroban keys separately for greater visibility // into the performance of each mode. - if (mApp.getConfig().isUsingBucketListDB()) + if (!sorobanKeys.empty()) { - if (!sorobanKeys.empty()) - { - mApp.getLedgerTxnRoot().prefetchSoroban(sorobanKeys, - lkMeter.get()); - } + mApp.getLedgerTxnRoot().prefetchSoroban(sorobanKeys, lkMeter.get()); } + mApp.getLedgerTxnRoot().prefetchClassic(classicKeys); } } @@ -1656,10 +1646,10 @@ LedgerManagerImpl::storeCurrentLedger(LedgerHeader const& header, mApp.getPersistentState().setState(PersistentState::kLastClosedLedger, binToHex(hash)); - BucketList bl; + LiveBucketList bl; if (mApp.getConfig().MODE_ENABLES_BUCKETLIST) { - bl = mApp.getBucketManager().getBucketList(); + bl = mApp.getBucketManager().getLiveBucketList(); } // Store the current HAS in the database; this is really just to checkpoint // the bucketlist so we can survive a restart and re-attach to the buckets. @@ -1695,29 +1685,47 @@ LedgerManagerImpl::transferLedgerEntriesToBucketList( if (blEnabled && protocolVersionStartsFrom(initialLedgerVers, SOROBAN_PROTOCOL_VERSION)) { + auto ttlKeys = ltx.getAllTTLKeysWithoutSealing(); + auto deletedPersistentDataKeys = + ltx.getAllDeletedPersistentContractDataKeysWithoutSealing(); + auto createdPersistentDataKeys = + ltx.getAllCreatedPersistentContractDataKeysWithoutSealing(); + LedgerTxn ltxEvictions(ltx); + + auto evictedEntries = + mApp.getBucketManager().resolveBackgroundEvictionScan( + ltxEvictions, lh.ledgerSeq, ttlKeys, initialLedgerVers); + + if (protocolVersionStartsFrom( + initialLedgerVers, + Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) { - auto keys = ltx.getAllTTLKeysWithoutSealing(); - LedgerTxn ltxEvictions(ltx); + auto createdVec = + std::vector(createdPersistentDataKeys.begin(), + createdPersistentDataKeys.end()); + auto deletedVec = + std::vector(deletedPersistentDataKeys.begin(), + deletedPersistentDataKeys.end()); + mApp.getBucketManager().addHotArchiveBatch( + mApp, lh, evictedEntries.second, createdVec, deletedVec); - if (mApp.getConfig().isUsingBackgroundEviction()) - { - mApp.getBucketManager().resolveBackgroundEvictionScan( - ltxEvictions, lh.ledgerSeq, keys); - } - else + if (ledgerCloseMeta) { - mApp.getBucketManager().scanForEvictionLegacy(ltxEvictions, - lh.ledgerSeq); + ledgerCloseMeta->populateEvictedEntries(evictedEntries); } + } + else + { if (ledgerCloseMeta) { - ledgerCloseMeta->populateEvictedEntries( + ledgerCloseMeta->populateEvictedEntriesLegacy( ltxEvictions.getChanges()); } - ltxEvictions.commit(); } + ltxEvictions.commit(); + getSorobanNetworkConfigInternal().maybeSnapshotBucketListSize( lh.ledgerSeq, ltx, mApp); } @@ -1725,8 +1733,8 @@ LedgerManagerImpl::transferLedgerEntriesToBucketList( ltx.getAllEntries(initEntries, liveEntries, deadEntries); if (blEnabled) { - mApp.getBucketManager().addBatch(mApp, lh, initEntries, liveEntries, - deadEntries); + mApp.getBucketManager().addLiveBatch(mApp, lh, initEntries, liveEntries, + deadEntries); } } diff --git a/src/ledger/LedgerManagerImpl.h b/src/ledger/LedgerManagerImpl.h index a5b1ae860a..4217b964de 100644 --- a/src/ledger/LedgerManagerImpl.h +++ b/src/ledger/LedgerManagerImpl.h @@ -181,10 +181,10 @@ class LedgerManagerImpl : public LedgerManager Database& getDatabase() override; - void - startCatchup(CatchupConfiguration configuration, - std::shared_ptr archive, - std::set> bucketsToRetain) override; + void startCatchup( + CatchupConfiguration configuration, + std::shared_ptr archive, + std::set> bucketsToRetain) override; void closeLedger(LedgerCloseData const& ledgerData) override; void deleteOldEntries(Database& db, uint32_t ledgerSeq, diff --git a/src/ledger/LedgerStateSnapshot.cpp b/src/ledger/LedgerStateSnapshot.cpp index 6f0228884e..10aedf0ed4 100644 --- a/src/ledger/LedgerStateSnapshot.cpp +++ b/src/ledger/LedgerStateSnapshot.cpp @@ -164,7 +164,7 @@ LedgerTxnReadOnly::executeWithMaybeInnerSnapshot( } BucketSnapshotState::BucketSnapshotState(BucketManager& bm) - : mSnapshot(bm.getSearchableBucketListSnapshot()) + : mSnapshot(bm.getSearchableLiveBucketListSnapshot()) , mLedgerHeader(LedgerHeaderWrapper( std::make_shared(mSnapshot->getLedgerHeader()))) { @@ -223,7 +223,8 @@ LedgerSnapshot::LedgerSnapshot(AbstractLedgerTxn& ltx) LedgerSnapshot::LedgerSnapshot(Application& app) { - if (app.getConfig().DEPRECATED_SQL_LEDGER_STATE) +#ifdef BUILD_TESTS + if (app.getConfig().MODE_USES_IN_MEMORY_LEDGER) { // Legacy read-only SQL transaction mLegacyLedgerTxn = std::make_unique( @@ -232,9 +233,8 @@ LedgerSnapshot::LedgerSnapshot(Application& app) mGetter = std::make_unique(*mLegacyLedgerTxn); } else - { +#endif mGetter = std::make_unique(app.getBucketManager()); - } } LedgerHeaderWrapper diff --git a/src/ledger/LedgerStateSnapshot.h b/src/ledger/LedgerStateSnapshot.h index 7a57b1c771..dc4f6f76f9 100644 --- a/src/ledger/LedgerStateSnapshot.h +++ b/src/ledger/LedgerStateSnapshot.h @@ -105,7 +105,7 @@ class LedgerTxnReadOnly : public AbstractLedgerStateSnapshot // A concrete implementation of read-only BucketList snapshot wrapper class BucketSnapshotState : public AbstractLedgerStateSnapshot { - std::shared_ptr mSnapshot; + std::shared_ptr mSnapshot; // Store a copy of the header from mSnapshot. This is needed for // validation flow where for certain validation scenarios the header needs // to be modified diff --git a/src/ledger/LedgerTxn.cpp b/src/ledger/LedgerTxn.cpp index 2085d7c92c..460a6242c6 100644 --- a/src/ledger/LedgerTxn.cpp +++ b/src/ledger/LedgerTxn.cpp @@ -23,6 +23,7 @@ #include "util/XDRStream.h" #include "util/types.h" #include "xdr/Stellar-ledger-entries.h" +#include "xdr/Stellar-types.h" #include "xdrpp/marshal.h" #include #include @@ -1483,6 +1484,18 @@ LedgerTxn::getAllTTLKeysWithoutSealing() const return getImpl()->getAllTTLKeysWithoutSealing(); } +LedgerKeySet +LedgerTxn::getAllDeletedPersistentContractDataKeysWithoutSealing() const +{ + return getImpl()->getAllDeletedPersistentContractDataKeysWithoutSealing(); +} + +LedgerKeySet +LedgerTxn::getAllCreatedPersistentContractDataKeysWithoutSealing() const +{ + return getImpl()->getAllCreatedPersistentContractDataKeysWithoutSealing(); +} + LedgerKeySet LedgerTxn::Impl::getAllTTLKeysWithoutSealing() const { @@ -1500,6 +1513,43 @@ LedgerTxn::Impl::getAllTTLKeysWithoutSealing() const return result; } +LedgerKeySet +LedgerTxn::Impl::getAllDeletedPersistentContractDataKeysWithoutSealing() const +{ + throwIfNotExactConsistency(); + LedgerKeySet result; + for (auto const& [k, v] : mEntry) + { + if (k.type() == InternalLedgerEntryType::LEDGER_ENTRY && + k.ledgerKey().type() == CONTRACT_DATA && + k.ledgerKey().contractData().durability == PERSISTENT && + v.isDeleted()) + { + result.emplace(k.ledgerKey()); + } + } + + return result; +} + +LedgerKeySet +LedgerTxn::Impl::getAllCreatedPersistentContractDataKeysWithoutSealing() const +{ + throwIfNotExactConsistency(); + LedgerKeySet result; + for (auto const& [k, v] : mEntry) + { + if (k.type() == InternalLedgerEntryType::LEDGER_ENTRY && + k.ledgerKey().type() == CONTRACT_DATA && + k.ledgerKey().contractData().durability == PERSISTENT && v.isInit()) + { + result.emplace(k.ledgerKey()); + } + } + + return result; +} + std::shared_ptr LedgerTxn::getNewestVersion(InternalLedgerKey const& key) const { @@ -2009,34 +2059,16 @@ LedgerTxn::Impl::unsealHeader(LedgerTxn& self, } uint64_t -LedgerTxn::countObjects(LedgerEntryType let) const +LedgerTxn::countOffers(LedgerRange const& ledgers) const { - throw std::runtime_error("called countObjects on non-root LedgerTxn"); -} - -uint64_t -LedgerTxn::countObjects(LedgerEntryType let, LedgerRange const& ledgers) const -{ - throw std::runtime_error("called countObjects on non-root LedgerTxn"); + throw std::runtime_error("called countOffers on non-root LedgerTxn"); } void -LedgerTxn::deleteObjectsModifiedOnOrAfterLedger(uint32_t ledger) const +LedgerTxn::deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const { throw std::runtime_error( - "called deleteObjectsModifiedOnOrAfterLedger on non-root LedgerTxn"); -} - -void -LedgerTxn::dropAccounts(bool rebuild) -{ - throw std::runtime_error("called dropAccounts on non-root LedgerTxn"); -} - -void -LedgerTxn::dropData(bool rebuild) -{ - throw std::runtime_error("called dropData on non-root LedgerTxn"); + "called deleteOffersModifiedOnOrAfterLedger on non-root LedgerTxn"); } void @@ -2045,49 +2077,6 @@ LedgerTxn::dropOffers(bool rebuild) throw std::runtime_error("called dropOffers on non-root LedgerTxn"); } -void -LedgerTxn::dropTrustLines(bool rebuild) -{ - throw std::runtime_error("called dropTrustLines on non-root LedgerTxn"); -} - -void -LedgerTxn::dropClaimableBalances(bool rebuild) -{ - throw std::runtime_error( - "called dropClaimableBalances on non-root LedgerTxn"); -} - -void -LedgerTxn::dropLiquidityPools(bool rebuild) -{ - throw std::runtime_error("called dropLiquidityPools on non-root LedgerTxn"); -} - -void -LedgerTxn::dropContractData(bool rebuild) -{ - throw std::runtime_error("called dropContractData on non-root LedgerTxn"); -} - -void -LedgerTxn::dropContractCode(bool rebuild) -{ - throw std::runtime_error("called dropContractCode on non-root LedgerTxn"); -} - -void -LedgerTxn::dropConfigSettings(bool rebuild) -{ - throw std::runtime_error("called dropConfigSettings on non-root LedgerTxn"); -} - -void -LedgerTxn::dropTTL(bool rebuild) -{ - throw std::runtime_error("called dropTTL on non-root LedgerTxn"); -} - double LedgerTxn::getPrefetchHitRate() const { @@ -2617,8 +2606,7 @@ accum(EntryIterator const& iter, std::vector& upsertBuffer, // Return true only if something is actually accumulated and not skipped over bool -BulkLedgerEntryChangeAccumulator::accumulate(EntryIterator const& iter, - bool bucketListDBEnabled) +BulkLedgerEntryChangeAccumulator::accumulate(EntryIterator const& iter) { // Right now, only LEDGER_ENTRY are recorded in the SQL database if (iter.key().type() != InternalLedgerEntryType::LEDGER_ENTRY) @@ -2626,55 +2614,15 @@ BulkLedgerEntryChangeAccumulator::accumulate(EntryIterator const& iter, return false; } - // Don't accumulate entry types that are supported by BucketListDB when it - // is enabled + // Don't accumulate entry types that are supported by BucketListDB auto type = iter.key().ledgerKey().type(); - if (bucketListDBEnabled && !BucketIndex::typeNotSupported(type)) + if (!BucketIndex::typeNotSupported(type)) { return false; } - switch (type) - { - case ACCOUNT: - accum(iter, mAccountsToUpsert, mAccountsToDelete); - break; - case TRUSTLINE: - accum(iter, mTrustLinesToUpsert, mTrustLinesToDelete); - break; - case OFFER: - accum(iter, mOffersToUpsert, mOffersToDelete); - break; - case DATA: - accum(iter, mAccountDataToUpsert, mAccountDataToDelete); - break; - case CLAIMABLE_BALANCE: - accum(iter, mClaimableBalanceToUpsert, mClaimableBalanceToDelete); - break; - case LIQUIDITY_POOL: - accum(iter, mLiquidityPoolToUpsert, mLiquidityPoolToDelete); - break; - case CONTRACT_DATA: - accum(iter, mContractDataToUpsert, mContractDataToDelete); - break; - case CONTRACT_CODE: - accum(iter, mContractCodeToUpsert, mContractCodeToDelete); - break; - case CONFIG_SETTING: - { - // Configuration can not be deleted. - releaseAssert(iter.entryExists()); - std::vector emptyEntries; - accum(iter, mConfigSettingsToUpsert, emptyEntries); - break; - } - case TTL: - accum(iter, mTTLToUpsert, mTTLToDelete); - break; - default: - abort(); - } - + releaseAssertOrThrow(type == OFFER); + accum(iter, mOffersToUpsert, mOffersToDelete); return true; } @@ -2683,30 +2631,7 @@ LedgerTxnRoot::Impl::bulkApply(BulkLedgerEntryChangeAccumulator& bleca, size_t bufferThreshold, LedgerTxnConsistency cons) { - auto& upsertAccounts = bleca.getAccountsToUpsert(); - if (upsertAccounts.size() > bufferThreshold) - { - bulkUpsertAccounts(upsertAccounts); - upsertAccounts.clear(); - } - auto& deleteAccounts = bleca.getAccountsToDelete(); - if (deleteAccounts.size() > bufferThreshold) - { - bulkDeleteAccounts(deleteAccounts, cons); - deleteAccounts.clear(); - } - auto& upsertTrustLines = bleca.getTrustLinesToUpsert(); - if (upsertTrustLines.size() > bufferThreshold) - { - bulkUpsertTrustLines(upsertTrustLines); - upsertTrustLines.clear(); - } - auto& deleteTrustLines = bleca.getTrustLinesToDelete(); - if (deleteTrustLines.size() > bufferThreshold) - { - bulkDeleteTrustLines(deleteTrustLines, cons); - deleteTrustLines.clear(); - } + auto& upsertOffers = bleca.getOffersToUpsert(); if (upsertOffers.size() > bufferThreshold) { @@ -2719,87 +2644,6 @@ LedgerTxnRoot::Impl::bulkApply(BulkLedgerEntryChangeAccumulator& bleca, bulkDeleteOffers(deleteOffers, cons); deleteOffers.clear(); } - auto& upsertAccountData = bleca.getAccountDataToUpsert(); - if (upsertAccountData.size() > bufferThreshold) - { - bulkUpsertAccountData(upsertAccountData); - upsertAccountData.clear(); - } - auto& deleteAccountData = bleca.getAccountDataToDelete(); - if (deleteAccountData.size() > bufferThreshold) - { - bulkDeleteAccountData(deleteAccountData, cons); - deleteAccountData.clear(); - } - auto& upsertClaimableBalance = bleca.getClaimableBalanceToUpsert(); - if (upsertClaimableBalance.size() > bufferThreshold) - { - bulkUpsertClaimableBalance(upsertClaimableBalance); - upsertClaimableBalance.clear(); - } - auto& deleteClaimableBalance = bleca.getClaimableBalanceToDelete(); - if (deleteClaimableBalance.size() > bufferThreshold) - { - bulkDeleteClaimableBalance(deleteClaimableBalance, cons); - deleteClaimableBalance.clear(); - } - auto& upsertLiquidityPool = bleca.getLiquidityPoolToUpsert(); - if (upsertLiquidityPool.size() > bufferThreshold) - { - bulkUpsertLiquidityPool(upsertLiquidityPool); - upsertLiquidityPool.clear(); - } - auto& deleteLiquidityPool = bleca.getLiquidityPoolToDelete(); - if (deleteLiquidityPool.size() > bufferThreshold) - { - bulkDeleteLiquidityPool(deleteLiquidityPool, cons); - deleteLiquidityPool.clear(); - } - auto& upsertConfigSettings = bleca.getConfigSettingsToUpsert(); - if (upsertConfigSettings.size() > bufferThreshold) - { - bulkUpsertConfigSettings(upsertConfigSettings); - upsertConfigSettings.clear(); - } - auto& upsertContractData = bleca.getContractDataToUpsert(); - if (upsertContractData.size() > bufferThreshold) - { - bulkUpsertContractData(upsertContractData); - upsertContractData.clear(); - } - auto& deleteContractData = bleca.getContractDataToDelete(); - if (deleteContractData.size() > bufferThreshold) - { - bulkDeleteContractData(deleteContractData, cons); - deleteContractData.clear(); - } - - auto& upsertContractCode = bleca.getContractCodeToUpsert(); - if (upsertContractCode.size() > bufferThreshold) - { - bulkUpsertContractCode(upsertContractCode); - upsertContractCode.clear(); - } - auto& deleteContractCode = bleca.getContractCodeToDelete(); - if (deleteContractCode.size() > bufferThreshold) - { - bulkDeleteContractCode(deleteContractCode, cons); - deleteContractCode.clear(); - } - - auto& upsertTTL = bleca.getTTLToUpsert(); - if (upsertTTL.size() > bufferThreshold) - { - bulkUpsertTTL(upsertTTL); - upsertTTL.clear(); - } - - auto& deleteTTL = bleca.getTTLToDelete(); - if (deleteTTL.size() > bufferThreshold) - { - bulkDeleteTTL(deleteTTL, cons); - deleteTTL.clear(); - } } void @@ -2821,14 +2665,13 @@ LedgerTxnRoot::Impl::commitChild(EntryIterator iter, // guarantee, so use std::unique_ptr<...>::swap to achieve it auto childHeader = std::make_unique(mChild->getHeader()); - auto bucketListDBEnabled = mApp.getConfig().isUsingBucketListDB(); auto bleca = BulkLedgerEntryChangeAccumulator(); [[maybe_unused]] int64_t counter{0}; try { while ((bool)iter) { - if (bleca.accumulate(iter, bucketListDBEnabled)) + if (bleca.accumulate(iter)) { ++counter; } @@ -2907,40 +2750,18 @@ LedgerTxnRoot::Impl::tableFromLedgerEntryType(LedgerEntryType let) } uint64_t -LedgerTxnRoot::countObjects(LedgerEntryType let) const -{ - return mImpl->countObjects(let); -} - -uint64_t -LedgerTxnRoot::Impl::countObjects(LedgerEntryType let) const -{ - using namespace soci; - throwIfChild(); - - std::string query = - "SELECT COUNT(*) FROM " + tableFromLedgerEntryType(let) + ";"; - uint64_t count = 0; - mApp.getDatabase().getSession() << query, into(count); - return count; -} - -uint64_t -LedgerTxnRoot::countObjects(LedgerEntryType let, - LedgerRange const& ledgers) const +LedgerTxnRoot::countOffers(LedgerRange const& ledgers) const { - return mImpl->countObjects(let, ledgers); + return mImpl->countOffers(ledgers); } uint64_t -LedgerTxnRoot::Impl::countObjects(LedgerEntryType let, - LedgerRange const& ledgers) const +LedgerTxnRoot::Impl::countOffers(LedgerRange const& ledgers) const { using namespace soci; throwIfChild(); - std::string query = "SELECT COUNT(*) FROM " + - tableFromLedgerEntryType(let) + + std::string query = "SELECT COUNT(*) FROM offers" " WHERE lastmodified >= :v1 AND lastmodified < :v2;"; uint64_t count = 0; int first = static_cast(ledgers.mFirst); @@ -2951,38 +2772,22 @@ LedgerTxnRoot::Impl::countObjects(LedgerEntryType let, } void -LedgerTxnRoot::deleteObjectsModifiedOnOrAfterLedger(uint32_t ledger) const +LedgerTxnRoot::deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const { - return mImpl->deleteObjectsModifiedOnOrAfterLedger(ledger); + return mImpl->deleteOffersModifiedOnOrAfterLedger(ledger); } void -LedgerTxnRoot::Impl::deleteObjectsModifiedOnOrAfterLedger(uint32_t ledger) const +LedgerTxnRoot::Impl::deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const { using namespace soci; throwIfChild(); mEntryCache.clear(); mBestOffers.clear(); - for (auto let : xdr::xdr_traits::enum_values()) - { - LedgerEntryType t = static_cast(let); - std::string query = "DELETE FROM " + tableFromLedgerEntryType(t) + - " WHERE lastmodified >= :v1"; - mApp.getDatabase().getSession() << query, use(ledger); - } -} - -void -LedgerTxnRoot::dropAccounts(bool rebuild) -{ - mImpl->dropAccounts(rebuild); -} - -void -LedgerTxnRoot::dropData(bool rebuild) -{ - mImpl->dropData(rebuild); + std::string query = "DELETE FROM " + tableFromLedgerEntryType(OFFER) + + " WHERE lastmodified >= :v1"; + mApp.getDatabase().getSession() << query, use(ledger); } void @@ -2991,48 +2796,6 @@ LedgerTxnRoot::dropOffers(bool rebuild) mImpl->dropOffers(rebuild); } -void -LedgerTxnRoot::dropTrustLines(bool rebuild) -{ - mImpl->dropTrustLines(rebuild); -} - -void -LedgerTxnRoot::dropClaimableBalances(bool rebuild) -{ - mImpl->dropClaimableBalances(rebuild); -} - -void -LedgerTxnRoot::dropLiquidityPools(bool rebuild) -{ - mImpl->dropLiquidityPools(rebuild); -} - -void -LedgerTxnRoot::dropContractData(bool rebuild) -{ - mImpl->dropContractData(rebuild); -} - -void -LedgerTxnRoot::dropContractCode(bool rebuild) -{ - mImpl->dropContractCode(rebuild); -} - -void -LedgerTxnRoot::dropConfigSettings(bool rebuild) -{ - mImpl->dropConfigSettings(rebuild); -} - -void -LedgerTxnRoot::dropTTL(bool rebuild) -{ - mImpl->dropTTL(rebuild); -} - uint32_t LedgerTxnRoot::prefetchClassic(UnorderedSet const& keys) { @@ -3096,128 +2859,14 @@ LedgerTxnRoot::Impl::prefetchInternal(UnorderedSet const& keys, } }; - if (mApp.getConfig().isUsingBucketListDB()) + LedgerKeySet keysToSearch; + for (auto const& key : keys) { - LedgerKeySet keysToSearch; - for (auto const& key : keys) - { - insertIfNotLoaded(keysToSearch, key); - } - auto blLoad = getSearchableBucketListSnapshot().loadKeysWithLimits( - keysToSearch, lkMeter); - cacheResult(populateLoadedEntries(keysToSearch, blLoad, lkMeter)); - } - else - { - UnorderedSet accounts; - UnorderedSet offers; - UnorderedSet trustlines; - UnorderedSet data; - UnorderedSet claimablebalance; - UnorderedSet liquiditypool; - UnorderedSet contractdata; - UnorderedSet configSettings; - UnorderedSet contractCode; - UnorderedSet ttl; - - for (auto const& key : keys) - { - switch (key.type()) - { - case ACCOUNT: - insertIfNotLoaded(accounts, key); - if (accounts.size() == mBulkLoadBatchSize) - { - cacheResult(bulkLoadAccounts(accounts)); - accounts.clear(); - } - break; - case OFFER: - insertIfNotLoaded(offers, key); - if (offers.size() == mBulkLoadBatchSize) - { - cacheResult(bulkLoadOffers(offers)); - offers.clear(); - } - break; - case TRUSTLINE: - insertIfNotLoaded(trustlines, key); - if (trustlines.size() == mBulkLoadBatchSize) - { - cacheResult(bulkLoadTrustLines(trustlines)); - trustlines.clear(); - } - break; - case DATA: - insertIfNotLoaded(data, key); - if (data.size() == mBulkLoadBatchSize) - { - cacheResult(bulkLoadData(data)); - data.clear(); - } - break; - case CLAIMABLE_BALANCE: - insertIfNotLoaded(claimablebalance, key); - if (claimablebalance.size() == mBulkLoadBatchSize) - { - cacheResult(bulkLoadClaimableBalance(claimablebalance)); - claimablebalance.clear(); - } - break; - case LIQUIDITY_POOL: - insertIfNotLoaded(liquiditypool, key); - if (liquiditypool.size() == mBulkLoadBatchSize) - { - cacheResult(bulkLoadLiquidityPool(liquiditypool)); - liquiditypool.clear(); - } - break; - case CONTRACT_DATA: - insertIfNotLoaded(contractdata, key); - if (contractdata.size() == mBulkLoadBatchSize) - { - cacheResult(bulkLoadContractData(contractdata)); - contractdata.clear(); - } - break; - case CONTRACT_CODE: - insertIfNotLoaded(contractCode, key); - if (contractCode.size() == mBulkLoadBatchSize) - { - cacheResult(bulkLoadContractCode(contractCode)); - contractCode.clear(); - } - break; - case CONFIG_SETTING: - insertIfNotLoaded(configSettings, key); - if (configSettings.size() == mBulkLoadBatchSize) - { - cacheResult(bulkLoadConfigSettings(configSettings)); - configSettings.clear(); - } - break; - case TTL: - insertIfNotLoaded(ttl, key); - if (ttl.size() == mBulkLoadBatchSize) - { - cacheResult(bulkLoadTTL(ttl)); - ttl.clear(); - } - } - } - - // Prefetch whatever is remaining - cacheResult(bulkLoadAccounts(accounts)); - cacheResult(bulkLoadOffers(offers)); - cacheResult(bulkLoadTrustLines(trustlines)); - cacheResult(bulkLoadData(data)); - cacheResult(bulkLoadClaimableBalance(claimablebalance)); - cacheResult(bulkLoadLiquidityPool(liquiditypool)); - cacheResult(bulkLoadConfigSettings(configSettings)); - cacheResult(bulkLoadContractData(contractdata)); - cacheResult(bulkLoadContractCode(contractCode)); - cacheResult(bulkLoadTTL(ttl)); + insertIfNotLoaded(keysToSearch, key); } + auto blLoad = getSearchableLiveBucketListSnapshot().loadKeysWithLimits( + keysToSearch, lkMeter); + cacheResult(populateLoadedEntries(keysToSearch, blLoad, lkMeter)); return total; } @@ -3486,15 +3135,15 @@ LedgerTxnRoot::Impl::areEntriesMissingInCacheForOffer(OfferEntry const& oe) return false; } -SearchableBucketListSnapshot& -LedgerTxnRoot::Impl::getSearchableBucketListSnapshot() const +SearchableLiveBucketListSnapshot& +LedgerTxnRoot::Impl::getSearchableLiveBucketListSnapshot() const { - releaseAssert(mApp.getConfig().isUsingBucketListDB()); if (!mSearchableBucketListSnapshot) { - mSearchableBucketListSnapshot = mApp.getBucketManager() - .getBucketSnapshotManager() - .copySearchableBucketListSnapshot(); + mSearchableBucketListSnapshot = + mApp.getBucketManager() + .getBucketSnapshotManager() + .copySearchableLiveBucketListSnapshot(); } return *mSearchableBucketListSnapshot; @@ -3632,17 +3281,9 @@ LedgerTxnRoot::Impl::getPoolShareTrustLinesByAccountAndAsset( std::vector trustLines; try { - if (mApp.getConfig().isUsingBucketListDB()) - { - trustLines = - getSearchableBucketListSnapshot() - .loadPoolShareTrustLinesByAccountAndAsset(account, asset); - } - else - { - trustLines = - loadPoolShareTrustLinesByAccountAndAsset(account, asset); - } + trustLines = + getSearchableLiveBucketListSnapshot() + .loadPoolShareTrustLinesByAccountAndAsset(account, asset); } catch (NonSociRelatedException&) { @@ -3696,15 +3337,8 @@ LedgerTxnRoot::Impl::getInflationWinners(size_t maxWinners, int64_t minVotes) { try { - if (mApp.getConfig().isUsingBucketListDB()) - { - return getSearchableBucketListSnapshot().loadInflationWinners( - maxWinners, minVotes); - } - else - { - return loadInflationWinners(maxWinners, minVotes); - } + return getSearchableLiveBucketListSnapshot().loadInflationWinners( + maxWinners, minVotes); } catch (std::exception& e) { @@ -3752,47 +3386,13 @@ LedgerTxnRoot::Impl::getNewestVersion(InternalLedgerKey const& gkey) const std::shared_ptr entry; try { - if (mApp.getConfig().isUsingBucketListDB() && key.type() != OFFER) + if (key.type() != OFFER) { - entry = getSearchableBucketListSnapshot().load(key); + entry = getSearchableLiveBucketListSnapshot().load(key); } else { - switch (key.type()) - { - case ACCOUNT: - entry = loadAccount(key); - break; - case DATA: - entry = loadData(key); - break; - case OFFER: - entry = loadOffer(key); - break; - case TRUSTLINE: - entry = loadTrustLine(key); - break; - case CLAIMABLE_BALANCE: - entry = loadClaimableBalance(key); - break; - case LIQUIDITY_POOL: - entry = loadLiquidityPool(key); - break; - case CONTRACT_DATA: - entry = loadContractData(key); - break; - case CONTRACT_CODE: - entry = loadContractCode(key); - break; - case CONFIG_SETTING: - entry = loadConfigSetting(key); - break; - case TTL: - entry = loadTTL(key); - break; - default: - throw std::runtime_error("Unknown key type"); - } + entry = loadOffer(key); } } catch (NonSociRelatedException&) diff --git a/src/ledger/LedgerTxn.h b/src/ledger/LedgerTxn.h index 6e755f651e..3534f747e9 100644 --- a/src/ledger/LedgerTxn.h +++ b/src/ledger/LedgerTxn.h @@ -463,61 +463,19 @@ class AbstractLedgerTxnParent virtual std::shared_ptr getNewestVersion(InternalLedgerKey const& key) const = 0; - // Return the count of the number of ledger objects of type `let`. Will - // throw when called on anything other than a (real or stub) root LedgerTxn. - virtual uint64_t countObjects(LedgerEntryType let) const = 0; - - // Return the count of the number of ledger objects of type `let` within + // Return the count of the number of offer objects of type `let` within // range of ledgers `ledgers`. Will throw when called on anything other than // a (real or stub) root LedgerTxn. - virtual uint64_t countObjects(LedgerEntryType let, - LedgerRange const& ledgers) const = 0; + virtual uint64_t countOffers(LedgerRange const& ledgers) const = 0; // Delete all ledger entries modified on-or-after `ledger`. Will throw // when called on anything other than a (real or stub) root LedgerTxn. - virtual void - deleteObjectsModifiedOnOrAfterLedger(uint32_t ledger) const = 0; - - // Delete all account ledger entries in the database. Will throw when called - // on anything other than a (real or stub) root LedgerTxn. - virtual void dropAccounts(bool rebuild) = 0; - - // Delete all account-data ledger entries. Will throw when called on - // anything other than a (real or stub) root LedgerTxn. - virtual void dropData(bool rebuild) = 0; + virtual void deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const = 0; // Delete all offer ledger entries. Will throw when called on anything other // than a (real or stub) root LedgerTxn. virtual void dropOffers(bool rebuild) = 0; - // Delete all trustline ledger entries. Will throw when called on anything - // other than a (real or stub) root LedgerTxn. - virtual void dropTrustLines(bool rebuild) = 0; - - // Delete all claimable balance ledger entries. Will throw when called on - // anything other than a (real or stub) root LedgerTxn. - virtual void dropClaimableBalances(bool rebuild) = 0; - - // Delete all liquidity pool ledger entries. Will throw when called on - // anything other than a (real or stub) root LedgerTxn. - virtual void dropLiquidityPools(bool rebuild) = 0; - - // Delete all contract data ledger entries. Will throw when called on - // anything other than a (real or stub) root LedgerTxn. - virtual void dropContractData(bool rebuild) = 0; - - // Delete all contract code ledger entries. Will throw when called on - // anything other than a (real or stub) root LedgerTxn. - virtual void dropContractCode(bool rebuild) = 0; - - // Delete all config setting ledger entries. Will throw when called on - // anything other than a (real or stub) root LedgerTxn. - virtual void dropConfigSettings(bool rebuild) = 0; - - // Delete all ttl ledger entries. Will throw when called on - // anything other than a (real or stub) root LedgerTxn. - virtual void dropTTL(bool rebuild) = 0; - // Return the current cache hit rate for prefetched ledger entries, as a // fraction from 0.0 to 1.0. Will throw when called on anything other than a // (real or stub) root LedgerTxn. @@ -655,6 +613,12 @@ class AbstractLedgerTxn : public AbstractLedgerTxnParent // modified. virtual LedgerKeySet getAllTTLKeysWithoutSealing() const = 0; + virtual LedgerKeySet + getAllDeletedPersistentContractDataKeysWithoutSealing() const = 0; + + virtual LedgerKeySet + getAllCreatedPersistentContractDataKeysWithoutSealing() const = 0; + // forAllWorstBestOffers allows a parent AbstractLedgerTxn to process the // worst best offers (an offer is a worst best offer if every better offer // in any parent AbstractLedgerTxn has already been loaded). This function @@ -781,6 +745,10 @@ class LedgerTxn : public AbstractLedgerTxn std::vector& liveEntries, std::vector& deadEntries) override; LedgerKeySet getAllTTLKeysWithoutSealing() const override; + LedgerKeySet + getAllDeletedPersistentContractDataKeysWithoutSealing() const override; + LedgerKeySet + getAllCreatedPersistentContractDataKeysWithoutSealing() const override; std::shared_ptr getNewestVersion(InternalLedgerKey const& key) const override; @@ -815,20 +783,9 @@ class LedgerTxn : public AbstractLedgerTxn void unsealHeader(std::function f) override; - uint64_t countObjects(LedgerEntryType let) const override; - uint64_t countObjects(LedgerEntryType let, - LedgerRange const& ledgers) const override; - void deleteObjectsModifiedOnOrAfterLedger(uint32_t ledger) const override; - void dropAccounts(bool rebuild) override; - void dropData(bool rebuild) override; + uint64_t countOffers(LedgerRange const& ledgers) const override; + void deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const override; void dropOffers(bool rebuild) override; - void dropTrustLines(bool rebuild) override; - void dropClaimableBalances(bool rebuild) override; - void dropLiquidityPools(bool rebuild) override; - void dropContractData(bool rebuild) override; - void dropContractCode(bool rebuild) override; - void dropConfigSettings(bool rebuild) override; - void dropTTL(bool rebuild) override; double getPrefetchHitRate() const override; uint32_t prefetchClassic(UnorderedSet const& keys) override; @@ -879,22 +836,11 @@ class LedgerTxnRoot : public AbstractLedgerTxnParent void commitChild(EntryIterator iter, LedgerTxnConsistency cons) noexcept override; - uint64_t countObjects(LedgerEntryType let) const override; - uint64_t countObjects(LedgerEntryType let, - LedgerRange const& ledgers) const override; + uint64_t countOffers(LedgerRange const& ledgers) const override; - void deleteObjectsModifiedOnOrAfterLedger(uint32_t ledger) const override; + void deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const override; - void dropAccounts(bool rebuild) override; - void dropData(bool rebuild) override; void dropOffers(bool rebuild) override; - void dropTrustLines(bool rebuild) override; - void dropClaimableBalances(bool rebuild) override; - void dropLiquidityPools(bool rebuild) override; - void dropContractData(bool rebuild) override; - void dropContractCode(bool rebuild) override; - void dropConfigSettings(bool rebuild) override; - void dropTTL(bool rebuild) override; #ifdef BUILD_TESTS void resetForFuzzer() override; diff --git a/src/ledger/LedgerTxnAccountSQL.cpp b/src/ledger/LedgerTxnAccountSQL.cpp deleted file mode 100644 index db51158f65..0000000000 --- a/src/ledger/LedgerTxnAccountSQL.cpp +++ /dev/null @@ -1,678 +0,0 @@ -// Copyright 2018 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -#include "crypto/KeyUtils.h" -#include "crypto/SecretKey.h" -#include "crypto/SignerKey.h" -#include "database/Database.h" -#include "database/DatabaseTypeSpecificOperation.h" -#include "ledger/LedgerTxnImpl.h" -#include "ledger/LedgerTypeUtils.h" -#include "main/Application.h" -#include "util/Decoder.h" -#include "util/GlobalChecks.h" -#include "util/Logging.h" -#include "util/XDROperators.h" -#include "util/types.h" -#include "xdrpp/marshal.h" -#include - -namespace stellar -{ - -std::shared_ptr -LedgerTxnRoot::Impl::loadAccount(LedgerKey const& key) const -{ - ZoneScoped; - std::string actIDStrKey = KeyUtils::toStrKey(key.account().accountID); - - std::string inflationDest, homeDomain, thresholds, signers; - soci::indicator inflationDestInd, signersInd; - std::string extensionStr; - soci::indicator extensionInd; - std::string ledgerExtStr; - soci::indicator ledgerExtInd; - - LedgerEntry le; - le.data.type(ACCOUNT); - auto& account = le.data.account(); - - auto prep = mApp.getDatabase().getPreparedStatement( - "SELECT balance, seqnum, numsubentries, " - "inflationdest, homedomain, thresholds, " - "flags, lastmodified, " - "signers, extension, " - "ledgerext FROM accounts WHERE accountid=:v1"); - auto& st = prep.statement(); - st.exchange(soci::into(account.balance)); - st.exchange(soci::into(account.seqNum)); - st.exchange(soci::into(account.numSubEntries)); - st.exchange(soci::into(inflationDest, inflationDestInd)); - st.exchange(soci::into(homeDomain)); - st.exchange(soci::into(thresholds)); - st.exchange(soci::into(account.flags)); - st.exchange(soci::into(le.lastModifiedLedgerSeq)); - st.exchange(soci::into(signers, signersInd)); - st.exchange(soci::into(extensionStr, extensionInd)); - st.exchange(soci::into(ledgerExtStr, ledgerExtInd)); - st.exchange(soci::use(actIDStrKey)); - st.define_and_bind(); - { - auto timer = mApp.getDatabase().getSelectTimer("account"); - st.execute(true); - } - if (!st.got_data()) - { - return nullptr; - } - - account.accountID = key.account().accountID; - decoder::decode_b64(homeDomain, account.homeDomain); - - bn::decode_b64(thresholds.begin(), thresholds.end(), - account.thresholds.begin()); - - if (inflationDestInd == soci::i_ok) - { - account.inflationDest.activate() = - KeyUtils::fromStrKey(inflationDest); - } - - if (signersInd == soci::i_ok) - { - std::vector signersOpaque; - decoder::decode_b64(signers, signersOpaque); - xdr::xdr_from_opaque(signersOpaque, account.signers); - releaseAssert( - std::adjacent_find(account.signers.begin(), account.signers.end(), - [](Signer const& lhs, Signer const& rhs) { - return !(lhs.key < rhs.key); - }) == account.signers.end()); - } - - decodeOpaqueXDR(extensionStr, extensionInd, account.ext); - - decodeOpaqueXDR(ledgerExtStr, ledgerExtInd, le.ext); - - return std::make_shared(std::move(le)); -} - -std::vector -LedgerTxnRoot::Impl::loadInflationWinners(size_t maxWinners, - int64_t minBalance) const -{ - InflationWinner w; - std::string inflationDest; - - auto prep = mApp.getDatabase().getPreparedStatement( - "SELECT sum(balance) AS votes, inflationdest" - " FROM accounts WHERE inflationdest IS NOT NULL" - " AND balance >= 1000000000 GROUP BY inflationdest" - " ORDER BY votes DESC, inflationdest DESC LIMIT :lim"); - auto& st = prep.statement(); - st.exchange(soci::into(w.votes)); - st.exchange(soci::into(inflationDest)); - st.exchange(soci::use(maxWinners)); - st.define_and_bind(); - st.execute(true); - - std::vector winners; - while (st.got_data()) - { - w.accountID = KeyUtils::fromStrKey(inflationDest); - if (w.votes < minBalance) - { - break; - } - winners.push_back(w); - st.fetch(); - } - return winners; -} - -class BulkUpsertAccountsOperation : public DatabaseTypeSpecificOperation -{ - Database& mDB; - std::vector mAccountIDs; - std::vector mBalances; - std::vector mSeqNums; - std::vector mSubEntryNums; - std::vector mInflationDests; - std::vector mInflationDestInds; - std::vector mFlags; - std::vector mHomeDomains; - std::vector mThresholds; - std::vector mSigners; - std::vector mSignerInds; - std::vector mLastModifieds; - std::vector mExtensions; - std::vector mExtensionInds; - std::vector mLedgerExtensions; - - public: - BulkUpsertAccountsOperation(Database& DB, - std::vector const& entries) - : mDB(DB) - { - mAccountIDs.reserve(entries.size()); - mBalances.reserve(entries.size()); - mSeqNums.reserve(entries.size()); - mSubEntryNums.reserve(entries.size()); - mInflationDests.reserve(entries.size()); - mInflationDestInds.reserve(entries.size()); - mFlags.reserve(entries.size()); - mHomeDomains.reserve(entries.size()); - mThresholds.reserve(entries.size()); - mSigners.reserve(entries.size()); - mSignerInds.reserve(entries.size()); - mLastModifieds.reserve(entries.size()); - mExtensions.reserve(entries.size()); - mExtensionInds.reserve(entries.size()); - mLedgerExtensions.reserve(entries.size()); - - for (auto const& e : entries) - { - releaseAssert(e.entryExists()); - releaseAssert(e.entry().type() == - InternalLedgerEntryType::LEDGER_ENTRY); - auto const& le = e.entry().ledgerEntry(); - releaseAssert(le.data.type() == ACCOUNT); - auto const& account = le.data.account(); - mAccountIDs.emplace_back(KeyUtils::toStrKey(account.accountID)); - mBalances.emplace_back(account.balance); - mSeqNums.emplace_back(account.seqNum); - mSubEntryNums.emplace_back(unsignedToSigned(account.numSubEntries)); - - if (account.inflationDest) - { - mInflationDests.emplace_back( - KeyUtils::toStrKey(*account.inflationDest)); - mInflationDestInds.emplace_back(soci::i_ok); - } - else - { - mInflationDests.emplace_back(""); - mInflationDestInds.emplace_back(soci::i_null); - } - mFlags.emplace_back(unsignedToSigned(account.flags)); - mHomeDomains.emplace_back(decoder::encode_b64(account.homeDomain)); - mThresholds.emplace_back(decoder::encode_b64(account.thresholds)); - if (account.signers.empty()) - { - mSigners.emplace_back(""); - mSignerInds.emplace_back(soci::i_null); - } - else - { - mSigners.emplace_back( - decoder::encode_b64(xdr::xdr_to_opaque(account.signers))); - mSignerInds.emplace_back(soci::i_ok); - } - mLastModifieds.emplace_back( - unsignedToSigned(le.lastModifiedLedgerSeq)); - - if (account.ext.v() >= 1) - { - mExtensions.emplace_back( - decoder::encode_b64(xdr::xdr_to_opaque(account.ext))); - mExtensionInds.emplace_back(soci::i_ok); - } - else - { - mExtensions.emplace_back(""); - mExtensionInds.emplace_back(soci::i_null); - } - - mLedgerExtensions.emplace_back( - decoder::encode_b64(xdr::xdr_to_opaque(le.ext))); - } - } - - void - doSociGenericOperation() - { - std::string sql = - "INSERT INTO accounts ( " - "accountid, balance, seqnum, numsubentries, inflationdest," - "homedomain, thresholds, signers, flags, lastmodified, " - "extension, ledgerext " - ") VALUES ( " - ":id, :v1, :v2, :v3, :v4, :v5, :v6, :v7, :v8, :v9, :v10, :v11 " - ") ON CONFLICT (accountid) DO UPDATE SET " - "balance = excluded.balance, " - "seqnum = excluded.seqnum, " - "numsubentries = excluded.numsubentries, " - "inflationdest = excluded.inflationdest, " - "homedomain = excluded.homedomain, " - "thresholds = excluded.thresholds, " - "signers = excluded.signers, " - "flags = excluded.flags, " - "lastmodified = excluded.lastmodified, " - "extension = excluded.extension, " - "ledgerext = excluded.ledgerext"; - auto prep = mDB.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(mAccountIDs)); - st.exchange(soci::use(mBalances)); - st.exchange(soci::use(mSeqNums)); - st.exchange(soci::use(mSubEntryNums)); - st.exchange(soci::use(mInflationDests, mInflationDestInds)); - st.exchange(soci::use(mHomeDomains)); - st.exchange(soci::use(mThresholds)); - st.exchange(soci::use(mSigners, mSignerInds)); - st.exchange(soci::use(mFlags)); - st.exchange(soci::use(mLastModifieds)); - st.exchange(soci::use(mExtensions, mExtensionInds)); - st.exchange(soci::use(mLedgerExtensions)); - st.define_and_bind(); - { - auto timer = mDB.getUpsertTimer("account"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mAccountIDs.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } - - void - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - doSociGenericOperation(); - } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strAccountIDs, strBalances, strSeqNums, strSubEntryNums, - strInflationDests, strFlags, strHomeDomains, strThresholds, - strSigners, strLastModifieds, strExtensions, strLedgerExtensions; - - PGconn* conn = pg->conn_; - marshalToPGArray(conn, strAccountIDs, mAccountIDs); - marshalToPGArray(conn, strBalances, mBalances); - marshalToPGArray(conn, strSeqNums, mSeqNums); - marshalToPGArray(conn, strSubEntryNums, mSubEntryNums); - marshalToPGArray(conn, strInflationDests, mInflationDests, - &mInflationDestInds); - marshalToPGArray(conn, strFlags, mFlags); - marshalToPGArray(conn, strHomeDomains, mHomeDomains); - marshalToPGArray(conn, strThresholds, mThresholds); - marshalToPGArray(conn, strSigners, mSigners, &mSignerInds); - marshalToPGArray(conn, strLastModifieds, mLastModifieds); - marshalToPGArray(conn, strExtensions, mExtensions, &mExtensionInds); - marshalToPGArray(conn, strLedgerExtensions, mLedgerExtensions); - - std::string sql = "WITH r AS (SELECT " - "unnest(:ids::TEXT[]), " - "unnest(:v1::BIGINT[]), " - "unnest(:v2::BIGINT[]), " - "unnest(:v3::INT[]), " - "unnest(:v4::TEXT[]), " - "unnest(:v5::TEXT[]), " - "unnest(:v6::TEXT[]), " - "unnest(:v7::TEXT[]), " - "unnest(:v8::INT[]), " - "unnest(:v9::INT[]), " - "unnest(:v10::TEXT[]), " - "unnest(:v11::TEXT[]) " - ")" - "INSERT INTO accounts ( " - "accountid, balance, seqnum, " - "numsubentries, inflationdest, homedomain, " - "thresholds, signers, " - "flags, lastmodified, extension, " - "ledgerext " - ") SELECT * FROM r " - "ON CONFLICT (accountid) DO UPDATE SET " - "balance = excluded.balance, " - "seqnum = excluded.seqnum, " - "numsubentries = excluded.numsubentries, " - "inflationdest = excluded.inflationdest, " - "homedomain = excluded.homedomain, " - "thresholds = excluded.thresholds, " - "signers = excluded.signers, " - "flags = excluded.flags, " - "lastmodified = excluded.lastmodified, " - "extension = excluded.extension, " - "ledgerext = excluded.ledgerext"; - auto prep = mDB.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strAccountIDs)); - st.exchange(soci::use(strBalances)); - st.exchange(soci::use(strSeqNums)); - st.exchange(soci::use(strSubEntryNums)); - st.exchange(soci::use(strInflationDests)); - st.exchange(soci::use(strHomeDomains)); - st.exchange(soci::use(strThresholds)); - st.exchange(soci::use(strSigners)); - st.exchange(soci::use(strFlags)); - st.exchange(soci::use(strLastModifieds)); - st.exchange(soci::use(strExtensions)); - st.exchange(soci::use(strLedgerExtensions)); - st.define_and_bind(); - { - auto timer = mDB.getUpsertTimer("account"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mAccountIDs.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif -}; - -class BulkDeleteAccountsOperation : public DatabaseTypeSpecificOperation -{ - Database& mDB; - LedgerTxnConsistency mCons; - std::vector mAccountIDs; - - public: - BulkDeleteAccountsOperation(Database& DB, LedgerTxnConsistency cons, - std::vector const& entries) - : mDB(DB), mCons(cons) - { - for (auto const& e : entries) - { - releaseAssert(!e.entryExists()); - releaseAssert(e.key().type() == - InternalLedgerEntryType::LEDGER_ENTRY); - releaseAssert(e.key().ledgerKey().type() == ACCOUNT); - auto const& account = e.key().ledgerKey().account(); - mAccountIDs.emplace_back(KeyUtils::toStrKey(account.accountID)); - } - } - - void - doSociGenericOperation() - { - std::string sql = "DELETE FROM accounts WHERE accountid = :id"; - auto prep = mDB.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(mAccountIDs)); - st.define_and_bind(); - { - auto timer = mDB.getDeleteTimer("account"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mAccountIDs.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } - - void - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - doSociGenericOperation(); - } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - PGconn* conn = pg->conn_; - std::string strAccountIDs; - marshalToPGArray(conn, strAccountIDs, mAccountIDs); - std::string sql = - "WITH r AS (SELECT unnest(:ids::TEXT[])) " - "DELETE FROM accounts WHERE accountid IN (SELECT * FROM r)"; - auto prep = mDB.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strAccountIDs)); - st.define_and_bind(); - { - auto timer = mDB.getDeleteTimer("account"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mAccountIDs.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif -}; - -void -LedgerTxnRoot::Impl::bulkUpsertAccounts( - std::vector const& entries) -{ - ZoneScoped; - ZoneValue(static_cast(entries.size())); - BulkUpsertAccountsOperation op(mApp.getDatabase(), entries); - mApp.getDatabase().doDatabaseTypeSpecificOperation(op); -} - -void -LedgerTxnRoot::Impl::bulkDeleteAccounts( - std::vector const& entries, LedgerTxnConsistency cons) -{ - ZoneScoped; - ZoneValue(static_cast(entries.size())); - BulkDeleteAccountsOperation op(mApp.getDatabase(), cons, entries); - mApp.getDatabase().doDatabaseTypeSpecificOperation(op); -} - -void -LedgerTxnRoot::Impl::dropAccounts(bool rebuild) -{ - throwIfChild(); - mEntryCache.clear(); - mBestOffers.clear(); - - mApp.getDatabase().getSession() << "DROP TABLE IF EXISTS accounts;"; - mApp.getDatabase().getSession() << "DROP TABLE IF EXISTS signers;"; - - if (rebuild) - { - std::string coll = mApp.getDatabase().getSimpleCollationClause(); - - mApp.getDatabase().getSession() - << "CREATE TABLE accounts" - << "(" - << "accountid VARCHAR(56) " << coll << " PRIMARY KEY," - << "balance BIGINT NOT NULL CHECK (balance >= 0)," - "buyingliabilities BIGINT CHECK (buyingliabilities >= 0)," - "sellingliabilities BIGINT CHECK (sellingliabilities >= 0)," - "seqnum BIGINT NOT NULL," - "numsubentries INT NOT NULL CHECK (numsubentries " - ">= 0)," - "inflationdest VARCHAR(56)," - "homedomain VARCHAR(44) NOT NULL," - "thresholds TEXT NOT NULL," - "flags INT NOT NULL," - "signers TEXT," - "lastmodified INT NOT NULL," - "extension TEXT," - "ledgerext TEXT NOT NULL" - ");"; - if (!mApp.getDatabase().isSqlite()) - { - mApp.getDatabase().getSession() << "ALTER TABLE accounts " - << "ALTER COLUMN accountid " - << "TYPE VARCHAR(56) COLLATE \"C\""; - } - } -} - -class BulkLoadAccountsOperation - : public DatabaseTypeSpecificOperation> -{ - Database& mDb; - std::vector mAccountIDs; - - std::vector - executeAndFetch(soci::statement& st) - { - std::string accountID, inflationDest, homeDomain, thresholds, signers; - int64_t balance; - uint64_t seqNum; - uint32_t numSubEntries, flags, lastModified; - std::string extension; - soci::indicator inflationDestInd, signersInd, extensionInd; - std::string ledgerExtension; - soci::indicator ledgerExtInd; - - st.exchange(soci::into(accountID)); - st.exchange(soci::into(balance)); - st.exchange(soci::into(seqNum)); - st.exchange(soci::into(numSubEntries)); - st.exchange(soci::into(inflationDest, inflationDestInd)); - st.exchange(soci::into(homeDomain)); - st.exchange(soci::into(thresholds)); - st.exchange(soci::into(flags)); - st.exchange(soci::into(lastModified)); - st.exchange(soci::into(extension, extensionInd)); - st.exchange(soci::into(signers, signersInd)); - st.exchange(soci::into(ledgerExtension, ledgerExtInd)); - st.define_and_bind(); - { - auto timer = mDb.getSelectTimer("account"); - st.execute(true); - } - - std::vector res; - while (st.got_data()) - { - res.emplace_back(); - auto& le = res.back(); - le.data.type(ACCOUNT); - auto& ae = le.data.account(); - - ae.accountID = KeyUtils::fromStrKey(accountID); - ae.balance = balance; - ae.seqNum = seqNum; - ae.numSubEntries = numSubEntries; - - if (inflationDestInd == soci::i_ok) - { - ae.inflationDest.activate() = - KeyUtils::fromStrKey(inflationDest); - } - - decoder::decode_b64(homeDomain, ae.homeDomain); - - bn::decode_b64(thresholds.begin(), thresholds.end(), - ae.thresholds.begin()); - - if (inflationDestInd == soci::i_ok) - { - ae.inflationDest.activate() = - KeyUtils::fromStrKey(inflationDest); - } - - ae.flags = flags; - le.lastModifiedLedgerSeq = lastModified; - - decodeOpaqueXDR(extension, extensionInd, ae.ext); - - if (signersInd == soci::i_ok) - { - std::vector signersOpaque; - decoder::decode_b64(signers, signersOpaque); - xdr::xdr_from_opaque(signersOpaque, ae.signers); - releaseAssert(std::adjacent_find( - ae.signers.begin(), ae.signers.end(), - [](Signer const& lhs, Signer const& rhs) { - return !(lhs.key < rhs.key); - }) == ae.signers.end()); - } - - decodeOpaqueXDR(ledgerExtension, ledgerExtInd, le.ext); - - st.fetch(); - } - return res; - } - - public: - BulkLoadAccountsOperation(Database& db, UnorderedSet const& keys) - : mDb(db) - { - mAccountIDs.reserve(keys.size()); - for (auto const& k : keys) - { - releaseAssert(k.type() == ACCOUNT); - mAccountIDs.emplace_back(KeyUtils::toStrKey(k.account().accountID)); - } - } - - virtual std::vector - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - std::vector accountIDcstrs; - accountIDcstrs.reserve(mAccountIDs.size()); - for (auto const& acc : mAccountIDs) - { - accountIDcstrs.emplace_back(acc.c_str()); - } - - std::string sql = - "SELECT accountid, balance, seqnum, numsubentries, " - "inflationdest, homedomain, thresholds, flags, lastmodified, " - "extension, signers, ledgerext" - " FROM accounts " - "WHERE accountid IN carray(?, ?, 'char*')"; - - auto prep = mDb.getPreparedStatement(sql); - auto be = prep.statement().get_backend(); - if (be == nullptr) - { - throw std::runtime_error("no sql backend"); - } - auto sqliteStatement = - dynamic_cast(be); - auto st = sqliteStatement->stmt_; - - sqlite3_reset(st); - sqlite3_bind_pointer(st, 1, accountIDcstrs.data(), "carray", 0); - sqlite3_bind_int(st, 2, static_cast(accountIDcstrs.size())); - return executeAndFetch(prep.statement()); - } - -#ifdef USE_POSTGRES - virtual std::vector - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strAccountIDs; - marshalToPGArray(pg->conn_, strAccountIDs, mAccountIDs); - - std::string sql = - "WITH r AS (SELECT unnest(:v1::TEXT[])) " - "SELECT accountid, balance, seqnum, numsubentries, " - "inflationdest, homedomain, thresholds, flags, lastmodified, " - "extension, signers, ledgerext" - " FROM accounts " - "WHERE accountid IN (SELECT * FROM r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strAccountIDs)); - return executeAndFetch(st); - } -#endif -}; - -UnorderedMap> -LedgerTxnRoot::Impl::bulkLoadAccounts(UnorderedSet const& keys) const -{ - ZoneScoped; - ZoneValue(static_cast(keys.size())); - if (!keys.empty()) - { - BulkLoadAccountsOperation op(mApp.getDatabase(), keys); - return populateLoadedEntries( - keys, mApp.getDatabase().doDatabaseTypeSpecificOperation(op)); - } - else - { - return {}; - } -} -} diff --git a/src/ledger/LedgerTxnClaimableBalanceSQL.cpp b/src/ledger/LedgerTxnClaimableBalanceSQL.cpp deleted file mode 100644 index e952589209..0000000000 --- a/src/ledger/LedgerTxnClaimableBalanceSQL.cpp +++ /dev/null @@ -1,373 +0,0 @@ -// Copyright 2020 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -#include "ledger/LedgerTxnImpl.h" -#include "ledger/LedgerTypeUtils.h" -#include "main/Application.h" -#include "util/GlobalChecks.h" -#include "util/types.h" - -namespace stellar -{ - -std::shared_ptr -LedgerTxnRoot::Impl::loadClaimableBalance(LedgerKey const& key) const -{ - auto balanceID = toOpaqueBase64(key.claimableBalance().balanceID); - - std::string claimableBalanceEntryStr; - LedgerEntry le; - - std::string sql = "SELECT ledgerentry " - "FROM claimablebalance " - "WHERE balanceid= :balanceid"; - auto prep = mApp.getDatabase().getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::into(claimableBalanceEntryStr)); - st.exchange(soci::use(balanceID)); - st.define_and_bind(); - st.execute(true); - if (!st.got_data()) - { - return nullptr; - } - - fromOpaqueBase64(le, claimableBalanceEntryStr); - releaseAssert(le.data.type() == CLAIMABLE_BALANCE); - - return std::make_shared(std::move(le)); -} - -class BulkLoadClaimableBalanceOperation - : public DatabaseTypeSpecificOperation> -{ - Database& mDb; - std::vector mBalanceIDs; - - std::vector - executeAndFetch(soci::statement& st) - { - std::string balanceIdStr, claimableBalanceEntryStr; - - st.exchange(soci::into(balanceIdStr)); - st.exchange(soci::into(claimableBalanceEntryStr)); - st.define_and_bind(); - { - auto timer = mDb.getSelectTimer("claimablebalance"); - st.execute(true); - } - - std::vector res; - while (st.got_data()) - { - res.emplace_back(); - auto& le = res.back(); - - fromOpaqueBase64(le, claimableBalanceEntryStr); - releaseAssert(le.data.type() == CLAIMABLE_BALANCE); - - st.fetch(); - } - return res; - } - - public: - BulkLoadClaimableBalanceOperation(Database& db, - UnorderedSet const& keys) - : mDb(db) - { - mBalanceIDs.reserve(keys.size()); - for (auto const& k : keys) - { - releaseAssert(k.type() == CLAIMABLE_BALANCE); - mBalanceIDs.emplace_back( - toOpaqueBase64(k.claimableBalance().balanceID)); - } - } - - std::vector - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - std::vector cstrBalanceIDs; - cstrBalanceIDs.reserve(mBalanceIDs.size()); - for (size_t i = 0; i < mBalanceIDs.size(); ++i) - { - cstrBalanceIDs.emplace_back(mBalanceIDs[i].c_str()); - } - - std::string sql = "WITH r AS (SELECT value FROM carray(?, ?, 'char*')) " - "SELECT balanceid, ledgerentry " - "FROM claimablebalance " - "WHERE balanceid IN r"; - - auto prep = mDb.getPreparedStatement(sql); - auto be = prep.statement().get_backend(); - if (be == nullptr) - { - throw std::runtime_error("no sql backend"); - } - auto sqliteStatement = - dynamic_cast(be); - auto st = sqliteStatement->stmt_; - - sqlite3_reset(st); - sqlite3_bind_pointer(st, 1, cstrBalanceIDs.data(), "carray", 0); - sqlite3_bind_int(st, 2, static_cast(cstrBalanceIDs.size())); - return executeAndFetch(prep.statement()); - } - -#ifdef USE_POSTGRES - std::vector - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strBalanceIDs; - marshalToPGArray(pg->conn_, strBalanceIDs, mBalanceIDs); - - std::string sql = "WITH r AS (SELECT unnest(:v1::TEXT[])) " - "SELECT balanceid, ledgerentry " - "FROM claimablebalance " - "WHERE balanceid IN (SELECT * from r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strBalanceIDs)); - return executeAndFetch(st); - } -#endif -}; - -UnorderedMap> -LedgerTxnRoot::Impl::bulkLoadClaimableBalance( - UnorderedSet const& keys) const -{ - if (!keys.empty()) - { - BulkLoadClaimableBalanceOperation op(mApp.getDatabase(), keys); - return populateLoadedEntries( - keys, mApp.getDatabase().doDatabaseTypeSpecificOperation(op)); - } - else - { - return {}; - } -} - -class BulkDeleteClaimableBalanceOperation - : public DatabaseTypeSpecificOperation -{ - Database& mDb; - LedgerTxnConsistency mCons; - std::vector mBalanceIDs; - - public: - BulkDeleteClaimableBalanceOperation( - Database& db, LedgerTxnConsistency cons, - std::vector const& entries) - : mDb(db), mCons(cons) - { - mBalanceIDs.reserve(entries.size()); - for (auto const& e : entries) - { - releaseAssert(!e.entryExists()); - releaseAssert(e.key().ledgerKey().type() == CLAIMABLE_BALANCE); - mBalanceIDs.emplace_back(toOpaqueBase64( - e.key().ledgerKey().claimableBalance().balanceID)); - } - } - - void - doSociGenericOperation() - { - std::string sql = "DELETE FROM claimablebalance WHERE balanceid = :id"; - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(mBalanceIDs)); - st.define_and_bind(); - { - auto timer = mDb.getDeleteTimer("claimablebalance"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mBalanceIDs.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } - - void - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - doSociGenericOperation(); - } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strBalanceIDs; - marshalToPGArray(pg->conn_, strBalanceIDs, mBalanceIDs); - - std::string sql = "WITH r AS (SELECT unnest(:v1::TEXT[])) " - "DELETE FROM claimablebalance " - "WHERE balanceid IN (SELECT * FROM r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strBalanceIDs)); - st.define_and_bind(); - { - auto timer = mDb.getDeleteTimer("claimablebalance"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mBalanceIDs.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif -}; - -void -LedgerTxnRoot::Impl::bulkDeleteClaimableBalance( - std::vector const& entries, LedgerTxnConsistency cons) -{ - BulkDeleteClaimableBalanceOperation op(mApp.getDatabase(), cons, entries); - mApp.getDatabase().doDatabaseTypeSpecificOperation(op); -} - -class BulkUpsertClaimableBalanceOperation - : public DatabaseTypeSpecificOperation -{ - Database& mDb; - std::vector mBalanceIDs; - std::vector mClaimableBalanceEntrys; - std::vector mLastModifieds; - - void - accumulateEntry(LedgerEntry const& entry) - { - releaseAssert(entry.data.type() == CLAIMABLE_BALANCE); - mBalanceIDs.emplace_back( - toOpaqueBase64(entry.data.claimableBalance().balanceID)); - mClaimableBalanceEntrys.emplace_back(toOpaqueBase64(entry)); - mLastModifieds.emplace_back( - unsignedToSigned(entry.lastModifiedLedgerSeq)); - } - - public: - BulkUpsertClaimableBalanceOperation( - Database& Db, std::vector const& entryIter) - : mDb(Db) - { - for (auto const& e : entryIter) - { - releaseAssert(e.entryExists()); - accumulateEntry(e.entry().ledgerEntry()); - } - } - - void - doSociGenericOperation() - { - std::string sql = "INSERT INTO claimablebalance " - "(balanceid, ledgerentry, lastmodified) " - "VALUES " - "( :id, :v1, :v2 ) " - "ON CONFLICT (balanceid) DO UPDATE SET " - "balanceid = excluded.balanceid, ledgerentry = " - "excluded.ledgerentry, lastmodified = " - "excluded.lastmodified"; - - auto prep = mDb.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(mBalanceIDs)); - st.exchange(soci::use(mClaimableBalanceEntrys)); - st.exchange(soci::use(mLastModifieds)); - st.define_and_bind(); - { - auto timer = mDb.getUpsertTimer("claimablebalance"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mBalanceIDs.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } - - void - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - doSociGenericOperation(); - } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strBalanceIDs, strClaimableBalanceEntry, strLastModifieds; - - PGconn* conn = pg->conn_; - marshalToPGArray(conn, strBalanceIDs, mBalanceIDs); - marshalToPGArray(conn, strClaimableBalanceEntry, - mClaimableBalanceEntrys); - marshalToPGArray(conn, strLastModifieds, mLastModifieds); - - std::string sql = "WITH r AS " - "(SELECT unnest(:ids::TEXT[]), unnest(:v1::TEXT[]), " - "unnest(:v2::INT[]))" - "INSERT INTO claimablebalance " - "(balanceid, ledgerentry, lastmodified) " - "SELECT * FROM r " - "ON CONFLICT (balanceid) DO UPDATE SET " - "balanceid = excluded.balanceid, ledgerentry = " - "excluded.ledgerentry, " - "lastmodified = excluded.lastmodified"; - - auto prep = mDb.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strBalanceIDs)); - st.exchange(soci::use(strClaimableBalanceEntry)); - st.exchange(soci::use(strLastModifieds)); - st.define_and_bind(); - { - auto timer = mDb.getUpsertTimer("claimablebalance"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mBalanceIDs.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif -}; - -void -LedgerTxnRoot::Impl::bulkUpsertClaimableBalance( - std::vector const& entries) -{ - BulkUpsertClaimableBalanceOperation op(mApp.getDatabase(), entries); - mApp.getDatabase().doDatabaseTypeSpecificOperation(op); -} - -void -LedgerTxnRoot::Impl::dropClaimableBalances(bool rebuild) -{ - throwIfChild(); - mEntryCache.clear(); - mBestOffers.clear(); - - mApp.getDatabase().getSession() << "DROP TABLE IF EXISTS claimablebalance;"; - - if (rebuild) - { - std::string coll = mApp.getDatabase().getSimpleCollationClause(); - mApp.getDatabase().getSession() - << "CREATE TABLE claimablebalance (" - << "balanceid VARCHAR(48) " << coll << " PRIMARY KEY, " - << "ledgerentry TEXT NOT NULL, " - << "lastmodified INT NOT NULL);"; - } -} -} diff --git a/src/ledger/LedgerTxnConfigSettingSQL.cpp b/src/ledger/LedgerTxnConfigSettingSQL.cpp deleted file mode 100644 index d06282e203..0000000000 --- a/src/ledger/LedgerTxnConfigSettingSQL.cpp +++ /dev/null @@ -1,294 +0,0 @@ -// Copyright 2022 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -#include "ledger/LedgerTxnImpl.h" -#include "ledger/LedgerTypeUtils.h" -#include "ledger/NonSociRelatedException.h" -#include "main/Application.h" -#include "util/GlobalChecks.h" -#include "util/types.h" - -namespace stellar -{ - -static void -throwIfNotConfigSetting(LedgerEntryType type) -{ - if (type != CONFIG_SETTING) - { - throw NonSociRelatedException("LedgerEntry is not a CONFIG_SETTING"); - } -} - -std::shared_ptr -LedgerTxnRoot::Impl::loadConfigSetting(LedgerKey const& key) const -{ - int32_t configSettingID = key.configSetting().configSettingID; - std::string configSettingEntryStr; - - std::string sql = "SELECT ledgerentry " - "FROM configsettings " - "WHERE configsettingid = :configsettingid"; - auto prep = mApp.getDatabase().getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::into(configSettingEntryStr)); - st.exchange(soci::use(configSettingID)); - st.define_and_bind(); - { - auto timer = mApp.getDatabase().getSelectTimer("configsetting"); - st.execute(true); - } - if (!st.got_data()) - { - return nullptr; - } - - LedgerEntry le; - fromOpaqueBase64(le, configSettingEntryStr); - throwIfNotConfigSetting(le.data.type()); - - return std::make_shared(std::move(le)); -} - -class bulkLoadConfigSettingsOperation - : public DatabaseTypeSpecificOperation> -{ - Database& mDb; - std::vector mConfigSettingIDs; - - std::vector - executeAndFetch(soci::statement& st) - { - std::string configSettingEntryStr; - - st.exchange(soci::into(configSettingEntryStr)); - st.define_and_bind(); - { - auto timer = mDb.getSelectTimer("configsetting"); - st.execute(true); - } - - std::vector res; - while (st.got_data()) - { - res.emplace_back(); - auto& le = res.back(); - - fromOpaqueBase64(le, configSettingEntryStr); - throwIfNotConfigSetting(le.data.type()); - - st.fetch(); - } - return res; - } - - public: - bulkLoadConfigSettingsOperation(Database& db, - UnorderedSet const& keys) - : mDb(db) - { - mConfigSettingIDs.reserve(keys.size()); - for (auto const& k : keys) - { - throwIfNotConfigSetting(k.type()); - mConfigSettingIDs.emplace_back(k.configSetting().configSettingID); - } - } - - std::vector - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - std::string sql = "WITH r AS (SELECT value FROM carray(?, ?, 'int32')) " - "SELECT ledgerentry " - "FROM configsettings " - "WHERE configsettingid IN r"; - - auto prep = mDb.getPreparedStatement(sql); - auto be = prep.statement().get_backend(); - if (be == nullptr) - { - throw std::runtime_error("no sql backend"); - } - auto sqliteStatement = - dynamic_cast(be); - auto st = sqliteStatement->stmt_; - - sqlite3_reset(st); - sqlite3_bind_pointer(st, 1, (void*)mConfigSettingIDs.data(), "carray", - 0); - sqlite3_bind_int(st, 2, static_cast(mConfigSettingIDs.size())); - return executeAndFetch(prep.statement()); - } - -#ifdef USE_POSTGRES - std::vector - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strConfigSettingIDs; - marshalToPGArray(pg->conn_, strConfigSettingIDs, mConfigSettingIDs); - - std::string sql = "WITH r AS (SELECT unnest(:v1::INT[])) " - "SELECT ledgerentry " - "FROM configsettings " - "WHERE configsettingid IN (SELECT * from r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strConfigSettingIDs)); - return executeAndFetch(st); - } -#endif -}; - -UnorderedMap> -LedgerTxnRoot::Impl::bulkLoadConfigSettings( - UnorderedSet const& keys) const -{ - if (!keys.empty()) - { - bulkLoadConfigSettingsOperation op(mApp.getDatabase(), keys); - return populateLoadedEntries( - keys, mApp.getDatabase().doDatabaseTypeSpecificOperation(op)); - } - else - { - return {}; - } -} - -class bulkUpsertConfigSettingsOperation - : public DatabaseTypeSpecificOperation -{ - Database& mDb; - std::vector mConfigSettingIDs; - std::vector mConfigSettingEntries; - std::vector mLastModifieds; - - void - accumulateEntry(LedgerEntry const& entry) - { - throwIfNotConfigSetting(entry.data.type()); - - mConfigSettingIDs.emplace_back( - entry.data.configSetting().configSettingID()); - mConfigSettingEntries.emplace_back(toOpaqueBase64(entry)); - mLastModifieds.emplace_back( - unsignedToSigned(entry.lastModifiedLedgerSeq)); - } - - public: - bulkUpsertConfigSettingsOperation( - Database& Db, std::vector const& entryIter) - : mDb(Db) - { - for (auto const& e : entryIter) - { - releaseAssert(e.entryExists()); - accumulateEntry(e.entry().ledgerEntry()); - } - } - - void - doSociGenericOperation() - { - std::string sql = "INSERT INTO configsettings " - "(configsettingid, ledgerentry, lastmodified) " - "VALUES " - "( :id, :v1, :v2 ) " - "ON CONFLICT (configsettingid) DO UPDATE SET " - "ledgerentry = excluded.ledgerentry, " - "lastmodified = excluded.lastmodified"; - - auto prep = mDb.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(mConfigSettingIDs)); - st.exchange(soci::use(mConfigSettingEntries)); - st.exchange(soci::use(mLastModifieds)); - st.define_and_bind(); - { - auto timer = mDb.getUpsertTimer("configsetting"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != - mConfigSettingIDs.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } - - void - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - doSociGenericOperation(); - } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strConfigSettingIDs, strConfigSettingEntries, - strLastModifieds; - - PGconn* conn = pg->conn_; - marshalToPGArray(conn, strConfigSettingIDs, mConfigSettingIDs); - marshalToPGArray(conn, strConfigSettingEntries, mConfigSettingEntries); - marshalToPGArray(conn, strLastModifieds, mLastModifieds); - - std::string sql = "WITH r AS " - "(SELECT unnest(:ids::INT[]), unnest(:v1::TEXT[]), " - "unnest(:v2::INT[])) " - "INSERT INTO configsettings " - "(configsettingid, ledgerentry, lastmodified) " - "SELECT * FROM r " - "ON CONFLICT (configsettingid) DO UPDATE SET " - "ledgerentry = excluded.ledgerentry, " - "lastmodified = excluded.lastmodified"; - - auto prep = mDb.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strConfigSettingIDs)); - st.exchange(soci::use(strConfigSettingEntries)); - st.exchange(soci::use(strLastModifieds)); - st.define_and_bind(); - { - auto timer = mDb.getUpsertTimer("configsetting"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != - mConfigSettingIDs.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif -}; - -void -LedgerTxnRoot::Impl::bulkUpsertConfigSettings( - std::vector const& entries) -{ - bulkUpsertConfigSettingsOperation op(mApp.getDatabase(), entries); - mApp.getDatabase().doDatabaseTypeSpecificOperation(op); -} - -void -LedgerTxnRoot::Impl::dropConfigSettings(bool rebuild) -{ - throwIfChild(); - mEntryCache.clear(); - mBestOffers.clear(); - - mApp.getDatabase().getSession() << "DROP TABLE IF EXISTS configsettings;"; - - if (rebuild) - { - std::string coll = mApp.getDatabase().getSimpleCollationClause(); - mApp.getDatabase().getSession() - << "CREATE TABLE configsettings (" - << "configsettingid INT PRIMARY KEY, " - << "ledgerentry TEXT " << coll << " NOT NULL, " - << "lastmodified INT NOT NULL);"; - } -} -} \ No newline at end of file diff --git a/src/ledger/LedgerTxnContractCodeSQL.cpp b/src/ledger/LedgerTxnContractCodeSQL.cpp deleted file mode 100644 index 0421e8996c..0000000000 --- a/src/ledger/LedgerTxnContractCodeSQL.cpp +++ /dev/null @@ -1,386 +0,0 @@ -// Copyright 2022 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -#include "ledger/LedgerTxnImpl.h" -#include "ledger/LedgerTypeUtils.h" -#include "ledger/NonSociRelatedException.h" -#include "main/Application.h" -#include "util/GlobalChecks.h" -#include "util/types.h" - -namespace stellar -{ - -static void -throwIfNotContractCode(LedgerEntryType type) -{ - if (type != CONTRACT_CODE) - { - throw NonSociRelatedException("LedgerEntry is not a CONTRACT_CODE"); - } -} - -std::shared_ptr -LedgerTxnRoot::Impl::loadContractCode(LedgerKey const& k) const -{ - auto hash = toOpaqueBase64(k.contractCode().hash); - std::string contractCodeEntryStr; - - std::string sql = "SELECT ledgerentry " - "FROM contractcode " - "WHERE hash = :hash"; - auto prep = mApp.getDatabase().getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::into(contractCodeEntryStr)); - st.exchange(soci::use(hash)); - st.define_and_bind(); - { - auto timer = mApp.getDatabase().getSelectTimer("contractcode"); - st.execute(true); - } - if (!st.got_data()) - { - return nullptr; - } - - LedgerEntry le; - fromOpaqueBase64(le, contractCodeEntryStr); - throwIfNotContractCode(le.data.type()); - - return std::make_shared(std::move(le)); -} - -class BulkLoadContractCodeOperation - : public DatabaseTypeSpecificOperation> -{ - Database& mDb; - std::vector mHashes; - - std::vector - executeAndFetch(soci::statement& st) - { - std::string contractCodeEntryStr; - - st.exchange(soci::into(contractCodeEntryStr)); - st.define_and_bind(); - { - auto timer = mDb.getSelectTimer("contractcode"); - st.execute(true); - } - - std::vector res; - while (st.got_data()) - { - res.emplace_back(); - auto& le = res.back(); - - fromOpaqueBase64(le, contractCodeEntryStr); - throwIfNotContractCode(le.data.type()); - - st.fetch(); - } - return res; - } - - public: - BulkLoadContractCodeOperation(Database& db, - UnorderedSet const& keys) - : mDb(db) - { - mHashes.reserve(keys.size()); - for (auto const& k : keys) - { - throwIfNotContractCode(k.type()); - mHashes.emplace_back(toOpaqueBase64(k.contractCode().hash)); - } - } - - std::vector - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - std::vector cStrHashes; - cStrHashes.reserve(mHashes.size()); - for (auto const& h : mHashes) - { - cStrHashes.emplace_back(h.c_str()); - } - std::string sql = "SELECT ledgerentry " - "FROM contractcode " - "WHERE hash IN carray(?, ?, 'char*')"; - - auto prep = mDb.getPreparedStatement(sql); - auto be = prep.statement().get_backend(); - if (be == nullptr) - { - throw std::runtime_error("no sql backend"); - } - auto sqliteStatement = - dynamic_cast(be); - auto st = sqliteStatement->stmt_; - - sqlite3_reset(st); - sqlite3_bind_pointer(st, 1, (void*)cStrHashes.data(), "carray", 0); - sqlite3_bind_int(st, 2, static_cast(cStrHashes.size())); - return executeAndFetch(prep.statement()); - } - -#ifdef USE_POSTGRES - std::vector - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strHashes; - marshalToPGArray(pg->conn_, strHashes, mHashes); - - std::string sql = "WITH r AS (SELECT unnest(:v1::TEXT[])) " - "SELECT ledgerentry " - "FROM contractcode " - "WHERE (hash) IN (SELECT * from r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strHashes)); - return executeAndFetch(st); - } -#endif -}; - -UnorderedMap> -LedgerTxnRoot::Impl::bulkLoadContractCode( - UnorderedSet const& keys) const -{ - if (!keys.empty()) - { - BulkLoadContractCodeOperation op(mApp.getDatabase(), keys); - return populateLoadedEntries( - keys, mApp.getDatabase().doDatabaseTypeSpecificOperation(op)); - } - else - { - return {}; - } -} - -class BulkDeleteContractCodeOperation - : public DatabaseTypeSpecificOperation -{ - Database& mDb; - LedgerTxnConsistency mCons; - std::vector mHashes; - - public: - BulkDeleteContractCodeOperation(Database& db, LedgerTxnConsistency cons, - std::vector const& entries) - : mDb(db), mCons(cons) - { - mHashes.reserve(entries.size()); - for (auto const& e : entries) - { - releaseAssert(!e.entryExists()); - throwIfNotContractCode(e.key().ledgerKey().type()); - mHashes.emplace_back( - toOpaqueBase64(e.key().ledgerKey().contractCode().hash)); - } - } - - void - doSociGenericOperation() - { - std::string sql = "DELETE FROM contractcode WHERE hash = :id"; - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(mHashes)); - st.define_and_bind(); - { - auto timer = mDb.getDeleteTimer("contractcode"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mHashes.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } - - void - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - doSociGenericOperation(); - } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strHashes; - marshalToPGArray(pg->conn_, strHashes, mHashes); - - std::string sql = "WITH r AS (SELECT unnest(:v1::TEXT[])) " - "DELETE FROM contractcode " - "WHERE hash IN (SELECT * FROM r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strHashes)); - st.define_and_bind(); - { - auto timer = mDb.getDeleteTimer("contractcode"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mHashes.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif -}; - -void -LedgerTxnRoot::Impl::bulkDeleteContractCode( - std::vector const& entries, LedgerTxnConsistency cons) -{ - BulkDeleteContractCodeOperation op(mApp.getDatabase(), cons, entries); - mApp.getDatabase().doDatabaseTypeSpecificOperation(op); -} - -class BulkUpsertContractCodeOperation - : public DatabaseTypeSpecificOperation -{ - Database& mDb; - std::vector mHashes; - std::vector mContractCodeEntries; - std::vector mLastModifieds; - - void - accumulateEntry(LedgerEntry const& entry) - { - throwIfNotContractCode(entry.data.type()); - - mHashes.emplace_back(toOpaqueBase64(entry.data.contractCode().hash)); - mContractCodeEntries.emplace_back(toOpaqueBase64(entry)); - mLastModifieds.emplace_back( - unsignedToSigned(entry.lastModifiedLedgerSeq)); - } - - public: - BulkUpsertContractCodeOperation(Database& Db, - std::vector const& entryIter) - : mDb(Db) - { - for (auto const& e : entryIter) - { - releaseAssert(e.entryExists()); - accumulateEntry(e.entry().ledgerEntry()); - } - } - - void - doSociGenericOperation() - { - std::string sql = "INSERT INTO contractCode " - "(hash, ledgerentry, lastmodified) " - "VALUES " - "( :hash, :v1, :v2 ) " - "ON CONFLICT (hash) DO UPDATE SET " - "ledgerentry = excluded.ledgerentry, " - "lastmodified = excluded.lastmodified"; - - auto prep = mDb.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(mHashes)); - st.exchange(soci::use(mContractCodeEntries)); - st.exchange(soci::use(mLastModifieds)); - st.define_and_bind(); - { - auto timer = mDb.getUpsertTimer("contractcode"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mHashes.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } - - void - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - doSociGenericOperation(); - } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strHashes, strContractCodeEntries, strLastModifieds; - - PGconn* conn = pg->conn_; - marshalToPGArray(conn, strHashes, mHashes); - marshalToPGArray(conn, strContractCodeEntries, mContractCodeEntries); - marshalToPGArray(conn, strLastModifieds, mLastModifieds); - - std::string sql = "WITH r AS " - "(SELECT unnest(:v1::TEXT[]), " - "unnest(:v1::TEXT[]), unnest(:v2::INT[])) " - "INSERT INTO contractcode " - "(hash, ledgerentry, lastmodified) " - "SELECT * FROM r " - "ON CONFLICT (hash) DO UPDATE SET " - "ledgerentry = excluded.ledgerentry, " - "lastmodified = excluded.lastmodified"; - - auto prep = mDb.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strHashes)); - st.exchange(soci::use(strContractCodeEntries)); - st.exchange(soci::use(strLastModifieds)); - st.define_and_bind(); - { - auto timer = mDb.getUpsertTimer("contractcode"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mHashes.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif -}; - -void -LedgerTxnRoot::Impl::bulkUpsertContractCode( - std::vector const& entries) -{ - BulkUpsertContractCodeOperation op(mApp.getDatabase(), entries); - mApp.getDatabase().doDatabaseTypeSpecificOperation(op); -} - -void -LedgerTxnRoot::Impl::dropContractCode(bool rebuild) -{ - throwIfChild(); - mEntryCache.clear(); - mBestOffers.clear(); - - std::string coll = mApp.getDatabase().getSimpleCollationClause(); - - mApp.getDatabase().getSession() << "DROP TABLE IF EXISTS contractcode;"; - - if (rebuild) - { - mApp.getDatabase().getSession() - << "CREATE TABLE contractcode (" - << "hash TEXT " << coll << " NOT NULL, " - << "ledgerentry TEXT " << coll << " NOT NULL, " - << "lastmodified INT NOT NULL, " - << "PRIMARY KEY (hash));"; - if (!mApp.getDatabase().isSqlite()) - { - mApp.getDatabase().getSession() << "ALTER TABLE contractcode " - << "ALTER COLUMN hash " - << "TYPE TEXT COLLATE \"C\";"; - } - } -} - -} diff --git a/src/ledger/LedgerTxnContractDataSQL.cpp b/src/ledger/LedgerTxnContractDataSQL.cpp deleted file mode 100644 index a7f716a561..0000000000 --- a/src/ledger/LedgerTxnContractDataSQL.cpp +++ /dev/null @@ -1,461 +0,0 @@ -// Copyright 2022 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -#include "ledger/LedgerTxnImpl.h" -#include "ledger/LedgerTypeUtils.h" -#include "ledger/NonSociRelatedException.h" -#include "main/Application.h" -#include "util/GlobalChecks.h" -#include "util/types.h" - -namespace stellar -{ - -static void -throwIfNotContractData(LedgerEntryType type) -{ - if (type != CONTRACT_DATA) - { - throw NonSociRelatedException("LedgerEntry is not a CONTRACT_DATA"); - } -} - -std::shared_ptr -LedgerTxnRoot::Impl::loadContractData(LedgerKey const& k) const -{ - auto contractID = toOpaqueBase64(k.contractData().contract); - auto key = toOpaqueBase64(k.contractData().key); - int32_t type = k.contractData().durability; - std::string contractDataEntryStr; - - std::string sql = - "SELECT ledgerentry " - "FROM contractdata " - "WHERE contractID = :contractID AND key = :key AND type = :type"; - auto prep = mApp.getDatabase().getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::into(contractDataEntryStr)); - st.exchange(soci::use(contractID)); - st.exchange(soci::use(key)); - st.exchange(soci::use(type)); - st.define_and_bind(); - { - auto timer = mApp.getDatabase().getSelectTimer("contractdata"); - st.execute(true); - } - if (!st.got_data()) - { - return nullptr; - } - - LedgerEntry le; - fromOpaqueBase64(le, contractDataEntryStr); - throwIfNotContractData(le.data.type()); - - return std::make_shared(std::move(le)); -} - -class BulkLoadContractDataOperation - : public DatabaseTypeSpecificOperation> -{ - Database& mDb; - std::vector mContractIDs; - std::vector mKeys; - std::vector mTypes; - - std::vector - executeAndFetch(soci::statement& st) - { - std::string contractDataEntryStr; - - st.exchange(soci::into(contractDataEntryStr)); - st.define_and_bind(); - { - auto timer = mDb.getSelectTimer("contractdata"); - st.execute(true); - } - - std::vector res; - while (st.got_data()) - { - res.emplace_back(); - auto& le = res.back(); - - fromOpaqueBase64(le, contractDataEntryStr); - throwIfNotContractData(le.data.type()); - - st.fetch(); - } - return res; - } - - public: - BulkLoadContractDataOperation(Database& db, - UnorderedSet const& keys) - : mDb(db) - { - mContractIDs.reserve(keys.size()); - mKeys.reserve(keys.size()); - mTypes.reserve(keys.size()); - for (auto const& k : keys) - { - throwIfNotContractData(k.type()); - mContractIDs.emplace_back( - toOpaqueBase64(k.contractData().contract)); - mKeys.emplace_back(toOpaqueBase64(k.contractData().key)); - mTypes.emplace_back(k.contractData().durability); - } - } - - std::vector - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - std::vector cStrContractIDs, cStrKeys; - cStrContractIDs.reserve(mContractIDs.size()); - cStrKeys.reserve(cStrKeys.size()); - for (auto const& cid : mContractIDs) - { - cStrContractIDs.emplace_back(cid.c_str()); - } - for (auto const& key : mKeys) - { - cStrKeys.emplace_back(key.c_str()); - } - - std::string sqlJoin = "SELECT x.value, y.value, z.value " - "FROM " - "(SELECT rowid, value FROM carray(?, ?, 'char*') " - "ORDER BY rowid) AS x " - "INNER JOIN " - "(SELECT rowid, value FROM carray(?, ?, 'char*') " - "ORDER BY rowid) AS y " - "ON x.rowid = y.rowid " - "INNER JOIN " - "(SELECT rowid, value FROM carray(?, ?, 'int32') " - "ORDER BY rowid) AS z " - "ON x.rowid = z.rowid"; - - std::string sql = "WITH r AS (" + sqlJoin + - ") " - "SELECT ledgerentry " - "FROM contractdata " - "WHERE (contractid, key, type) IN (SELECT * FROM r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto be = prep.statement().get_backend(); - if (be == nullptr) - { - throw std::runtime_error("no sql backend"); - } - auto sqliteStatement = - dynamic_cast(be); - auto st = sqliteStatement->stmt_; - - sqlite3_reset(st); - sqlite3_bind_pointer(st, 1, (void*)cStrContractIDs.data(), "carray", 0); - sqlite3_bind_int(st, 2, static_cast(mContractIDs.size())); - sqlite3_bind_pointer(st, 3, (void*)cStrKeys.data(), "carray", 0); - sqlite3_bind_int(st, 4, static_cast(mKeys.size())); - sqlite3_bind_pointer(st, 5, (void*)mTypes.data(), "carray", 0); - sqlite3_bind_int(st, 6, static_cast(mTypes.size())); - return executeAndFetch(prep.statement()); - } - -#ifdef USE_POSTGRES - std::vector - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strContractIDs, strKeys, strTypes; - marshalToPGArray(pg->conn_, strContractIDs, mContractIDs); - marshalToPGArray(pg->conn_, strKeys, mKeys); - marshalToPGArray(pg->conn_, strTypes, mTypes); - - std::string sql = "WITH r AS (SELECT unnest(:ids::TEXT[]), " - "unnest(:v1::TEXT[]), unnest(:v2::INT[])) " - "SELECT ledgerentry " - "FROM contractdata " - "WHERE (contractid, key, type) IN (SELECT * from r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strContractIDs)); - st.exchange(soci::use(strKeys)); - st.exchange(soci::use(strTypes)); - return executeAndFetch(st); - } -#endif -}; - -UnorderedMap> -LedgerTxnRoot::Impl::bulkLoadContractData( - UnorderedSet const& keys) const -{ - if (!keys.empty()) - { - BulkLoadContractDataOperation op(mApp.getDatabase(), keys); - return populateLoadedEntries( - keys, mApp.getDatabase().doDatabaseTypeSpecificOperation(op)); - } - else - { - return {}; - } -} - -class BulkDeleteContractDataOperation - : public DatabaseTypeSpecificOperation -{ - Database& mDb; - LedgerTxnConsistency mCons; - std::vector mContractIDs; - std::vector mKeys; - std::vector mTypes; - - public: - BulkDeleteContractDataOperation(Database& db, LedgerTxnConsistency cons, - std::vector const& entries) - : mDb(db), mCons(cons) - { - mContractIDs.reserve(entries.size()); - for (auto const& e : entries) - { - releaseAssert(!e.entryExists()); - throwIfNotContractData(e.key().ledgerKey().type()); - mContractIDs.emplace_back( - toOpaqueBase64(e.key().ledgerKey().contractData().contract)); - mKeys.emplace_back( - toOpaqueBase64(e.key().ledgerKey().contractData().key)); - mTypes.emplace_back(e.key().ledgerKey().contractData().durability); - } - } - - void - doSociGenericOperation() - { - std::string sql = "DELETE FROM contractdata WHERE contractid = :id " - "AND key = :key AND type = :type"; - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(mContractIDs)); - st.exchange(soci::use(mKeys)); - st.exchange(soci::use(mTypes)); - st.define_and_bind(); - { - auto timer = mDb.getDeleteTimer("contractdata"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != - mContractIDs.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } - - void - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - doSociGenericOperation(); - } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strContractIDs, strKeys, strTypes; - marshalToPGArray(pg->conn_, strContractIDs, mContractIDs); - marshalToPGArray(pg->conn_, strKeys, mKeys); - marshalToPGArray(pg->conn_, strTypes, mTypes); - - std::string sql = "WITH r AS (SELECT unnest(:ids::TEXT[]), " - "unnest(:v1::TEXT[]), unnest(:v2::INT[])) " - "DELETE FROM contractdata " - "WHERE (contractid, key, type) IN (SELECT * FROM r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strContractIDs)); - st.exchange(soci::use(strKeys)); - st.exchange(soci::use(strTypes)); - st.define_and_bind(); - { - auto timer = mDb.getDeleteTimer("contractdata"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != - mContractIDs.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif -}; - -void -LedgerTxnRoot::Impl::bulkDeleteContractData( - std::vector const& entries, LedgerTxnConsistency cons) -{ - BulkDeleteContractDataOperation op(mApp.getDatabase(), cons, entries); - mApp.getDatabase().doDatabaseTypeSpecificOperation(op); -} - -class BulkUpsertContractDataOperation - : public DatabaseTypeSpecificOperation -{ - Database& mDb; - std::vector mContractIDs; - std::vector mKeys; - std::vector mTypes; - std::vector mContractDataEntries; - std::vector mLastModifieds; - - void - accumulateEntry(LedgerEntry const& entry) - { - throwIfNotContractData(entry.data.type()); - - mContractIDs.emplace_back( - toOpaqueBase64(entry.data.contractData().contract)); - mKeys.emplace_back(toOpaqueBase64(entry.data.contractData().key)); - mTypes.emplace_back(entry.data.contractData().durability); - mContractDataEntries.emplace_back(toOpaqueBase64(entry)); - mLastModifieds.emplace_back( - unsignedToSigned(entry.lastModifiedLedgerSeq)); - } - - public: - BulkUpsertContractDataOperation(Database& Db, - std::vector const& entryIter) - : mDb(Db) - { - for (auto const& e : entryIter) - { - releaseAssert(e.entryExists()); - accumulateEntry(e.entry().ledgerEntry()); - } - } - - void - doSociGenericOperation() - { - std::string sql = "INSERT INTO contractData " - "(contractid, key, type, ledgerentry, lastmodified) " - "VALUES " - "( :id, :key, :type, :v1, :v2 ) " - "ON CONFLICT (contractid, key, type) DO UPDATE SET " - "ledgerentry = excluded.ledgerentry, " - "lastmodified = excluded.lastmodified"; - - auto prep = mDb.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(mContractIDs)); - st.exchange(soci::use(mKeys)); - st.exchange(soci::use(mTypes)); - st.exchange(soci::use(mContractDataEntries)); - st.exchange(soci::use(mLastModifieds)); - st.define_and_bind(); - { - auto timer = mDb.getUpsertTimer("contractdata"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mContractIDs.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } - - void - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - doSociGenericOperation(); - } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strContractIDs, strKeys, strTypes, strContractDataEntries, - strLastModifieds; - - PGconn* conn = pg->conn_; - marshalToPGArray(conn, strContractIDs, mContractIDs); - marshalToPGArray(conn, strKeys, mKeys); - marshalToPGArray(conn, strTypes, mTypes); - marshalToPGArray(conn, strContractDataEntries, mContractDataEntries); - marshalToPGArray(conn, strLastModifieds, mLastModifieds); - - std::string sql = - "WITH r AS " - "(SELECT unnest(:ids::TEXT[]), unnest(:v1::TEXT[]), " - "unnest(:v2::INT[]), unnest(:v3::TEXT[]), unnest(:v4::INT[])) " - "INSERT INTO contractdata " - "(contractid, key, type, ledgerentry, lastmodified) " - "SELECT * FROM r " - "ON CONFLICT (contractid,key,type) DO UPDATE SET " - "ledgerentry = excluded.ledgerentry, " - "lastmodified = excluded.lastmodified"; - - auto prep = mDb.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strContractIDs)); - st.exchange(soci::use(strKeys)); - st.exchange(soci::use(strTypes)); - st.exchange(soci::use(strContractDataEntries)); - st.exchange(soci::use(strLastModifieds)); - st.define_and_bind(); - { - auto timer = mDb.getUpsertTimer("contractdata"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mContractIDs.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif -}; - -void -LedgerTxnRoot::Impl::bulkUpsertContractData( - std::vector const& entries) -{ - BulkUpsertContractDataOperation op(mApp.getDatabase(), entries); - mApp.getDatabase().doDatabaseTypeSpecificOperation(op); -} - -void -LedgerTxnRoot::Impl::dropContractData(bool rebuild) -{ - throwIfChild(); - mEntryCache.clear(); - mBestOffers.clear(); - - mApp.getDatabase().getSession() << "DROP TABLE IF EXISTS contractdata;"; - - if (rebuild) - { - std::string coll = mApp.getDatabase().getSimpleCollationClause(); - mApp.getDatabase().getSession() - << "CREATE TABLE contractdata (" - << "contractid TEXT " << coll << " NOT NULL, " - << "key TEXT " << coll << " NOT NULL, " - << "type INT NOT NULL, " - << "ledgerentry TEXT " << coll << " NOT NULL, " - << "lastmodified INT NOT NULL, " - << "PRIMARY KEY (contractid, key, type));"; - if (!mApp.getDatabase().isSqlite()) - { - mApp.getDatabase().getSession() << "ALTER TABLE contractdata " - << "ALTER COLUMN contractid " - << "TYPE TEXT COLLATE \"C\"," - << "ALTER COLUMN key " - << "TYPE TEXT COLLATE \"C\"," - << "ALTER COLUMN type " - << "TYPE INT;"; - } - } -} - -} \ No newline at end of file diff --git a/src/ledger/LedgerTxnDataSQL.cpp b/src/ledger/LedgerTxnDataSQL.cpp deleted file mode 100644 index a17a38b208..0000000000 --- a/src/ledger/LedgerTxnDataSQL.cpp +++ /dev/null @@ -1,507 +0,0 @@ -// Copyright 2018 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -#include "crypto/KeyUtils.h" -#include "crypto/SecretKey.h" -#include "database/Database.h" -#include "database/DatabaseTypeSpecificOperation.h" -#include "ledger/LedgerTxnImpl.h" -#include "ledger/LedgerTypeUtils.h" -#include "main/Application.h" -#include "util/Decoder.h" -#include "util/GlobalChecks.h" -#include "util/Logging.h" -#include "util/types.h" -#include - -namespace stellar -{ - -std::shared_ptr -LedgerTxnRoot::Impl::loadData(LedgerKey const& key) const -{ - ZoneScoped; - std::string actIDStrKey = KeyUtils::toStrKey(key.data().accountID); - std::string dataName = decoder::encode_b64(key.data().dataName); - - std::string dataValue; - soci::indicator dataValueIndicator; - std::string extensionStr; - soci::indicator extensionInd; - std::string ledgerExtStr; - soci::indicator ledgerExtInd; - - LedgerEntry le; - le.data.type(DATA); - DataEntry& de = le.data.data(); - - std::string sql = "SELECT datavalue, lastmodified, extension, " - "ledgerext " - "FROM accountdata " - "WHERE accountid= :id AND dataname= :dataname"; - auto prep = mApp.getDatabase().getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::into(dataValue, dataValueIndicator)); - st.exchange(soci::into(le.lastModifiedLedgerSeq)); - st.exchange(soci::into(extensionStr, extensionInd)); - st.exchange(soci::into(ledgerExtStr, ledgerExtInd)); - st.exchange(soci::use(actIDStrKey)); - st.exchange(soci::use(dataName)); - st.define_and_bind(); - st.execute(true); - if (!st.got_data()) - { - return nullptr; - } - - de.accountID = key.data().accountID; - de.dataName = key.data().dataName; - - if (dataValueIndicator != soci::i_ok) - { - throw std::runtime_error("bad database state"); - } - decoder::decode_b64(dataValue, de.dataValue); - - decodeOpaqueXDR(extensionStr, extensionInd, de.ext); - - decodeOpaqueXDR(ledgerExtStr, ledgerExtInd, le.ext); - - return std::make_shared(std::move(le)); -} - -class BulkUpsertDataOperation : public DatabaseTypeSpecificOperation -{ - Database& mDB; - std::vector mAccountIDs; - std::vector mDataNames; - std::vector mDataValues; - std::vector mLastModifieds; - std::vector mExtensions; - std::vector mLedgerExtensions; - - void - accumulateEntry(LedgerEntry const& entry) - { - releaseAssert(entry.data.type() == DATA); - DataEntry const& data = entry.data.data(); - mAccountIDs.emplace_back(KeyUtils::toStrKey(data.accountID)); - mDataNames.emplace_back(decoder::encode_b64(data.dataName)); - mDataValues.emplace_back(decoder::encode_b64(data.dataValue)); - mLastModifieds.emplace_back( - unsignedToSigned(entry.lastModifiedLedgerSeq)); - mExtensions.emplace_back( - decoder::encode_b64(xdr::xdr_to_opaque(data.ext))); - mLedgerExtensions.emplace_back( - decoder::encode_b64(xdr::xdr_to_opaque(entry.ext))); - } - - public: - BulkUpsertDataOperation(Database& DB, - std::vector const& entries) - : mDB(DB) - { - for (auto const& e : entries) - { - accumulateEntry(e); - } - } - - BulkUpsertDataOperation(Database& DB, - std::vector const& entryIter) - : mDB(DB) - { - for (auto const& e : entryIter) - { - releaseAssert(e.entryExists()); - releaseAssert(e.entry().type() == - InternalLedgerEntryType::LEDGER_ENTRY); - accumulateEntry(e.entry().ledgerEntry()); - } - } - - void - doSociGenericOperation() - { - std::string sql = - "INSERT INTO accountdata ( " - "accountid, dataname, datavalue, lastmodified, extension, " - "ledgerext " - ") VALUES ( " - ":id, :v1, :v2, :v3, :v4, :v5 " - ") ON CONFLICT (accountid, dataname) DO UPDATE SET " - "datavalue = excluded.datavalue, " - "lastmodified = excluded.lastmodified, " - "extension = excluded.extension, " - "ledgerext = excluded.ledgerext"; - auto prep = mDB.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(mAccountIDs)); - st.exchange(soci::use(mDataNames)); - st.exchange(soci::use(mDataValues)); - st.exchange(soci::use(mLastModifieds)); - st.exchange(soci::use(mExtensions)); - st.exchange(soci::use(mLedgerExtensions)); - st.define_and_bind(); - { - auto timer = mDB.getUpsertTimer("data"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mAccountIDs.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } - - void - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - doSociGenericOperation(); - } -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strAccountIDs, strDataNames, strDataValues, - strLastModifieds, strExtensions, strLedgerExtensions; - - PGconn* conn = pg->conn_; - marshalToPGArray(conn, strAccountIDs, mAccountIDs); - marshalToPGArray(conn, strDataNames, mDataNames); - marshalToPGArray(conn, strDataValues, mDataValues); - marshalToPGArray(conn, strLastModifieds, mLastModifieds); - marshalToPGArray(conn, strExtensions, mExtensions); - marshalToPGArray(conn, strLedgerExtensions, mLedgerExtensions); - std::string sql = - "WITH r AS (SELECT " - "unnest(:ids::TEXT[]), " - "unnest(:v1::TEXT[]), " - "unnest(:v2::TEXT[]), " - "unnest(:v3::INT[]), " - "unnest(:v4::TEXT[]), " - "unnest(:v5::TEXT[]) " - ")" - "INSERT INTO accountdata ( " - "accountid, dataname, datavalue, lastmodified, extension, " - "ledgerext " - ") SELECT * FROM r " - "ON CONFLICT (accountid, dataname) DO UPDATE SET " - "datavalue = excluded.datavalue, " - "lastmodified = excluded.lastmodified, " - "extension = excluded.extension, " - "ledgerext = excluded.ledgerext"; - auto prep = mDB.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strAccountIDs)); - st.exchange(soci::use(strDataNames)); - st.exchange(soci::use(strDataValues)); - st.exchange(soci::use(strLastModifieds)); - st.exchange(soci::use(strExtensions)); - st.exchange(soci::use(strLedgerExtensions)); - st.define_and_bind(); - { - auto timer = mDB.getUpsertTimer("data"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mAccountIDs.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif -}; - -class BulkDeleteDataOperation : public DatabaseTypeSpecificOperation -{ - Database& mDB; - LedgerTxnConsistency mCons; - std::vector mAccountIDs; - std::vector mDataNames; - - public: - BulkDeleteDataOperation(Database& DB, LedgerTxnConsistency cons, - std::vector const& entries) - : mDB(DB), mCons(cons) - { - for (auto const& e : entries) - { - releaseAssert(!e.entryExists()); - releaseAssert(e.key().type() == - InternalLedgerEntryType::LEDGER_ENTRY); - releaseAssert(e.key().ledgerKey().type() == DATA); - auto const& data = e.key().ledgerKey().data(); - mAccountIDs.emplace_back(KeyUtils::toStrKey(data.accountID)); - mDataNames.emplace_back(decoder::encode_b64(data.dataName)); - } - } - - void - doSociGenericOperation() - { - std::string sql = "DELETE FROM accountdata WHERE accountid = :id AND " - " dataname = :v1 "; - auto prep = mDB.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(mAccountIDs)); - st.exchange(soci::use(mDataNames)); - st.define_and_bind(); - { - auto timer = mDB.getDeleteTimer("data"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mAccountIDs.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } - - void - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - doSociGenericOperation(); - } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strAccountIDs; - std::string strDataNames; - PGconn* conn = pg->conn_; - marshalToPGArray(conn, strAccountIDs, mAccountIDs); - marshalToPGArray(conn, strDataNames, mDataNames); - std::string sql = - "WITH r AS ( SELECT " - "unnest(:ids::TEXT[])," - "unnest(:v1::TEXT[])" - " ) " - "DELETE FROM accountdata WHERE (accountid, dataname) IN " - "(SELECT * FROM r)"; - auto prep = mDB.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strAccountIDs)); - st.exchange(soci::use(strDataNames)); - st.define_and_bind(); - { - auto timer = mDB.getDeleteTimer("data"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mAccountIDs.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif -}; - -void -LedgerTxnRoot::Impl::bulkUpsertAccountData( - std::vector const& entries) -{ - ZoneScoped; - ZoneValue(static_cast(entries.size())); - BulkUpsertDataOperation op(mApp.getDatabase(), entries); - mApp.getDatabase().doDatabaseTypeSpecificOperation(op); -} - -void -LedgerTxnRoot::Impl::bulkDeleteAccountData( - std::vector const& entries, LedgerTxnConsistency cons) -{ - ZoneScoped; - ZoneValue(static_cast(entries.size())); - BulkDeleteDataOperation op(mApp.getDatabase(), cons, entries); - mApp.getDatabase().doDatabaseTypeSpecificOperation(op); -} - -void -LedgerTxnRoot::Impl::dropData(bool rebuild) -{ - throwIfChild(); - mEntryCache.clear(); - mBestOffers.clear(); - - mApp.getDatabase().getSession() << "DROP TABLE IF EXISTS accountdata;"; - - if (rebuild) - { - std::string coll = mApp.getDatabase().getSimpleCollationClause(); - mApp.getDatabase().getSession() - << "CREATE TABLE accountdata" - << "(" - << "accountid VARCHAR(56) " << coll << " NOT NULL," - << "dataname VARCHAR(88) " << coll << " NOT NULL," - << "datavalue VARCHAR(112) NOT NULL," - "lastmodified INT NOT NULL," - "extension TEXT," - "ledgerext TEXT NOT NULL," - "PRIMARY KEY (accountid, dataname)" - ");"; - if (!mApp.getDatabase().isSqlite()) - { - mApp.getDatabase().getSession() - << "ALTER TABLE accountdata " - << "ALTER COLUMN accountid " - << "TYPE VARCHAR(56) COLLATE \"C\", " - << "ALTER COLUMN dataname " - << "TYPE VARCHAR(88) COLLATE \"C\""; - } - } -} - -class BulkLoadDataOperation - : public DatabaseTypeSpecificOperation> -{ - Database& mDb; - std::vector mAccountIDs; - std::vector mDataNames; - - std::vector - executeAndFetch(soci::statement& st) - { - std::string accountID, dataName, dataValue; - uint32_t lastModified; - std::string extension; - soci::indicator extensionInd; - std::string ledgerExtension; - soci::indicator ledgerExtInd; - - st.exchange(soci::into(accountID)); - st.exchange(soci::into(dataName)); - st.exchange(soci::into(dataValue)); - st.exchange(soci::into(lastModified)); - st.exchange(soci::into(extension, extensionInd)); - st.exchange(soci::into(ledgerExtension, ledgerExtInd)); - st.define_and_bind(); - { - auto timer = mDb.getSelectTimer("data"); - st.execute(true); - } - - std::vector res; - while (st.got_data()) - { - res.emplace_back(); - auto& le = res.back(); - le.data.type(DATA); - auto& de = le.data.data(); - - de.accountID = KeyUtils::fromStrKey(accountID); - decoder::decode_b64(dataName, de.dataName); - decoder::decode_b64(dataValue, de.dataValue); - le.lastModifiedLedgerSeq = lastModified; - - decodeOpaqueXDR(extension, extensionInd, de.ext); - - decodeOpaqueXDR(ledgerExtension, ledgerExtInd, le.ext); - - st.fetch(); - } - return res; - } - - public: - BulkLoadDataOperation(Database& db, UnorderedSet const& keys) - : mDb(db) - { - mAccountIDs.reserve(keys.size()); - mDataNames.reserve(keys.size()); - for (auto const& k : keys) - { - releaseAssert(k.type() == DATA); - mAccountIDs.emplace_back(KeyUtils::toStrKey(k.data().accountID)); - mDataNames.emplace_back(decoder::encode_b64(k.data().dataName)); - } - } - - virtual std::vector - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - releaseAssert(mAccountIDs.size() == mDataNames.size()); - - std::vector cstrAccountIDs; - std::vector cstrDataNames; - cstrAccountIDs.reserve(mAccountIDs.size()); - cstrDataNames.reserve(mDataNames.size()); - for (size_t i = 0; i < mAccountIDs.size(); ++i) - { - cstrAccountIDs.emplace_back(mAccountIDs[i].c_str()); - cstrDataNames.emplace_back(mDataNames[i].c_str()); - } - - std::string sqlJoin = - "SELECT x.value, y.value FROM " - "(SELECT rowid, value FROM carray(?, ?, 'char*') ORDER BY rowid) " - "AS x " - "INNER JOIN (SELECT rowid, value FROM carray(?, ?, 'char*') ORDER " - "BY rowid) AS y ON x.rowid = y.rowid"; - std::string sql = "WITH r AS (" + sqlJoin + - ") SELECT accountid, dataname, datavalue, " - "lastmodified, extension, " - "ledgerext " - "FROM accountdata WHERE (accountid, dataname) IN r"; - - auto prep = mDb.getPreparedStatement(sql); - auto be = prep.statement().get_backend(); - if (be == nullptr) - { - throw std::runtime_error("no sql backend"); - } - auto sqliteStatement = - dynamic_cast(be); - auto st = sqliteStatement->stmt_; - - sqlite3_reset(st); - sqlite3_bind_pointer(st, 1, cstrAccountIDs.data(), "carray", 0); - sqlite3_bind_int(st, 2, static_cast(cstrAccountIDs.size())); - sqlite3_bind_pointer(st, 3, cstrDataNames.data(), "carray", 0); - sqlite3_bind_int(st, 4, static_cast(cstrDataNames.size())); - return executeAndFetch(prep.statement()); - } - -#ifdef USE_POSTGRES - std::vector - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - releaseAssert(mAccountIDs.size() == mDataNames.size()); - - std::string strAccountIDs; - std::string strDataNames; - marshalToPGArray(pg->conn_, strAccountIDs, mAccountIDs); - marshalToPGArray(pg->conn_, strDataNames, mDataNames); - - std::string sql = - "WITH r AS (SELECT unnest(:v1::TEXT[]), unnest(:v2::TEXT[])) " - "SELECT accountid, dataname, datavalue, lastmodified, extension, " - "ledgerext " - "FROM accountdata WHERE (accountid, dataname) IN (SELECT * FROM r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strAccountIDs)); - st.exchange(soci::use(strDataNames)); - return executeAndFetch(st); - } -#endif -}; - -UnorderedMap> -LedgerTxnRoot::Impl::bulkLoadData(UnorderedSet const& keys) const -{ - ZoneScoped; - ZoneValue(static_cast(keys.size())); - if (!keys.empty()) - { - BulkLoadDataOperation op(mApp.getDatabase(), keys); - return populateLoadedEntries( - keys, mApp.getDatabase().doDatabaseTypeSpecificOperation(op)); - } - else - { - return {}; - } -} -} diff --git a/src/ledger/LedgerTxnImpl.h b/src/ledger/LedgerTxnImpl.h index 4d71595f70..d806c17af3 100644 --- a/src/ledger/LedgerTxnImpl.h +++ b/src/ledger/LedgerTxnImpl.h @@ -20,7 +20,7 @@ namespace stellar { -class SearchableBucketListSnapshot; +class SearchableLiveBucketListSnapshot; class EntryIterator::AbstractImpl { @@ -54,52 +54,10 @@ class EntryIterator::AbstractImpl // reorganizing the relevant parts of soci. class BulkLedgerEntryChangeAccumulator { - - std::vector mAccountsToUpsert; - std::vector mAccountsToDelete; - std::vector mAccountDataToUpsert; - std::vector mAccountDataToDelete; - std::vector mClaimableBalanceToUpsert; - std::vector mClaimableBalanceToDelete; std::vector mOffersToUpsert; std::vector mOffersToDelete; - std::vector mTrustLinesToUpsert; - std::vector mTrustLinesToDelete; - std::vector mLiquidityPoolToUpsert; - std::vector mLiquidityPoolToDelete; - std::vector mContractDataToUpsert; - std::vector mContractDataToDelete; - std::vector mContractCodeToUpsert; - std::vector mContractCodeToDelete; - std::vector mConfigSettingsToUpsert; - std::vector mTTLToUpsert; - std::vector mTTLToDelete; public: - std::vector& - getAccountsToUpsert() - { - return mAccountsToUpsert; - } - - std::vector& - getAccountsToDelete() - { - return mAccountsToDelete; - } - - std::vector& - getTrustLinesToUpsert() - { - return mTrustLinesToUpsert; - } - - std::vector& - getTrustLinesToDelete() - { - return mTrustLinesToDelete; - } - std::vector& getOffersToUpsert() { @@ -112,85 +70,7 @@ class BulkLedgerEntryChangeAccumulator return mOffersToDelete; } - std::vector& - getAccountDataToUpsert() - { - return mAccountDataToUpsert; - } - - std::vector& - getAccountDataToDelete() - { - return mAccountDataToDelete; - } - - std::vector& - getClaimableBalanceToUpsert() - { - return mClaimableBalanceToUpsert; - } - - std::vector& - getClaimableBalanceToDelete() - { - return mClaimableBalanceToDelete; - } - - std::vector& - getLiquidityPoolToUpsert() - { - return mLiquidityPoolToUpsert; - } - - std::vector& - getLiquidityPoolToDelete() - { - return mLiquidityPoolToDelete; - } - - std::vector& - getConfigSettingsToUpsert() - { - return mConfigSettingsToUpsert; - } - - std::vector& - getContractDataToUpsert() - { - return mContractDataToUpsert; - } - - std::vector& - getContractDataToDelete() - { - return mContractDataToDelete; - } - - std::vector& - getContractCodeToUpsert() - { - return mContractCodeToUpsert; - } - - std::vector& - getContractCodeToDelete() - { - return mContractCodeToDelete; - } - - std::vector& - getTTLToUpsert() - { - return mTTLToUpsert; - } - - std::vector& - getTTLToDelete() - { - return mTTLToDelete; - } - - bool accumulate(EntryIterator const& iter, bool bucketListDBEnabled); + bool accumulate(EntryIterator const& iter); }; // Many functions in LedgerTxn::Impl provide a basic exception safety @@ -512,7 +392,6 @@ class LedgerTxn::Impl // modified // - the entry cache may be, but is not guaranteed to be, cleared. LedgerTxnDelta getDelta(); - // getOffersByAccountAndAsset has the basic exception safety guarantee. If // it throws an exception, then // - the prepared statement cache may be, but is not guaranteed to be, @@ -552,6 +431,8 @@ class LedgerTxn::Impl std::vector& deadEntries); LedgerKeySet getAllTTLKeysWithoutSealing() const; + LedgerKeySet getAllDeletedPersistentContractDataKeysWithoutSealing() const; + LedgerKeySet getAllCreatedPersistentContractDataKeysWithoutSealing() const; // getNewestVersion has the basic exception safety guarantee. If it throws // an exception, then @@ -737,7 +618,7 @@ class LedgerTxnRoot::Impl mutable BestOffers mBestOffers; mutable uint64_t mPrefetchHits{0}; mutable uint64_t mPrefetchMisses{0}; - mutable std::shared_ptr + mutable std::shared_ptr mSearchableBucketListSnapshot{}; size_t mBulkLoadBatchSize; @@ -750,8 +631,6 @@ class LedgerTxnRoot::Impl void throwIfChild() const; - std::shared_ptr loadAccount(LedgerKey const& key) const; - std::shared_ptr loadData(LedgerKey const& key) const; std::shared_ptr loadOffer(LedgerKey const& key) const; std::vector loadAllOffers() const; std::deque::const_iterator @@ -767,55 +646,12 @@ class LedgerTxnRoot::Impl loadOffersByAccountAndAsset(AccountID const& accountID, Asset const& asset) const; std::vector loadOffers(StatementContext& prep) const; - std::vector loadInflationWinners(size_t maxWinners, - int64_t minBalance) const; - std::shared_ptr - loadTrustLine(LedgerKey const& key) const; - std::vector - loadPoolShareTrustLinesByAccountAndAsset(AccountID const& accountID, - Asset const& asset) const; - std::shared_ptr - loadClaimableBalance(LedgerKey const& key) const; - std::shared_ptr - loadLiquidityPool(LedgerKey const& key) const; - std::shared_ptr - loadContractData(LedgerKey const& key) const; - std::shared_ptr - loadContractCode(LedgerKey const& key) const; - std::shared_ptr - loadConfigSetting(LedgerKey const& key) const; - std::shared_ptr loadTTL(LedgerKey const& key) const; void bulkApply(BulkLedgerEntryChangeAccumulator& bleca, size_t bufferThreshold, LedgerTxnConsistency cons); - void bulkUpsertAccounts(std::vector const& entries); - void bulkDeleteAccounts(std::vector const& entries, - LedgerTxnConsistency cons); - void bulkUpsertTrustLines(std::vector const& entries); - void bulkDeleteTrustLines(std::vector const& entries, - LedgerTxnConsistency cons); void bulkUpsertOffers(std::vector const& entries); void bulkDeleteOffers(std::vector const& entries, LedgerTxnConsistency cons); - void bulkUpsertAccountData(std::vector const& entries); - void bulkDeleteAccountData(std::vector const& entries, - LedgerTxnConsistency cons); - void bulkUpsertClaimableBalance(std::vector const& entries); - void bulkDeleteClaimableBalance(std::vector const& entries, - LedgerTxnConsistency cons); - void bulkUpsertLiquidityPool(std::vector const& entries); - void bulkDeleteLiquidityPool(std::vector const& entries, - LedgerTxnConsistency cons); - void bulkUpsertContractData(std::vector const& entries); - void bulkDeleteContractData(std::vector const& entries, - LedgerTxnConsistency cons); - void bulkUpsertContractCode(std::vector const& entries); - void bulkDeleteContractCode(std::vector const& entries, - LedgerTxnConsistency cons); - void bulkUpsertConfigSettings(std::vector const& entries); - void bulkUpsertTTL(std::vector const& entries); - void bulkDeleteTTL(std::vector const& entries, - LedgerTxnConsistency cons); static std::string tableFromLedgerEntryType(LedgerEntryType let); @@ -841,27 +677,8 @@ class LedgerTxnRoot::Impl BestOffersEntryPtr getFromBestOffers(Asset const& buying, Asset const& selling) const; - UnorderedMap> - bulkLoadAccounts(UnorderedSet const& keys) const; - UnorderedMap> - bulkLoadTrustLines(UnorderedSet const& keys) const; UnorderedMap> bulkLoadOffers(UnorderedSet const& keys) const; - UnorderedMap> - bulkLoadData(UnorderedSet const& keys) const; - UnorderedMap> - bulkLoadClaimableBalance(UnorderedSet const& keys) const; - UnorderedMap> - bulkLoadLiquidityPool(UnorderedSet const& keys) const; - UnorderedMap> - bulkLoadContractData(UnorderedSet const& keys) const; - UnorderedMap> - bulkLoadContractCode(UnorderedSet const& keys) const; - UnorderedMap> - bulkLoadConfigSettings(UnorderedSet const& keys) const; - UnorderedMap> - bulkLoadTTL(UnorderedSet const& keys) const; - std::deque::const_iterator loadNextBestOffersIntoCache(BestOffersEntryPtr cached, Asset const& buying, Asset const& selling); @@ -871,7 +688,8 @@ class LedgerTxnRoot::Impl bool areEntriesMissingInCacheForOffer(OfferEntry const& oe); - SearchableBucketListSnapshot& getSearchableBucketListSnapshot() const; + SearchableLiveBucketListSnapshot& + getSearchableLiveBucketListSnapshot() const; uint32_t prefetchInternal(UnorderedSet const& keys, LedgerKeyMeter* lkMeter = nullptr); @@ -892,26 +710,14 @@ class LedgerTxnRoot::Impl void commitChild(EntryIterator iter, LedgerTxnConsistency cons) noexcept; - // countObjects has the strong exception safety guarantee. - uint64_t countObjects(LedgerEntryType let) const; - uint64_t countObjects(LedgerEntryType let, - LedgerRange const& ledgers) const; + // countOffers has the strong exception safety guarantee. + uint64_t countOffers(LedgerRange const& ledgers) const; - // deleteObjectsModifiedOnOrAfterLedger has no exception safety guarantees. - void deleteObjectsModifiedOnOrAfterLedger(uint32_t ledger) const; + // deleteOffersModifiedOnOrAfterLedger has no exception safety guarantees. + void deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const; - // dropAccounts, dropData, dropOffers, and dropTrustLines have no exception - // safety guarantees. - void dropAccounts(bool rebuild); - void dropData(bool rebuild); + // no exception safety guarantees. void dropOffers(bool rebuild); - void dropTrustLines(bool rebuild); - void dropClaimableBalances(bool rebuild); - void dropLiquidityPools(bool rebuild); - void dropContractData(bool rebuild); - void dropContractCode(bool rebuild); - void dropConfigSettings(bool rebuild); - void dropTTL(bool rebuild); #ifdef BUILD_TESTS void resetForFuzzer(); diff --git a/src/ledger/LedgerTxnLiquidityPoolSQL.cpp b/src/ledger/LedgerTxnLiquidityPoolSQL.cpp deleted file mode 100644 index ce8289b284..0000000000 --- a/src/ledger/LedgerTxnLiquidityPoolSQL.cpp +++ /dev/null @@ -1,419 +0,0 @@ -// Copyright 2020 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -#include "ledger/LedgerTxnImpl.h" -#include "ledger/LedgerTypeUtils.h" -#include "ledger/NonSociRelatedException.h" -#include "main/Application.h" -#include "util/GlobalChecks.h" -#include "util/types.h" - -namespace stellar -{ - -static void -throwIfNotLiquidityPool(LedgerEntryType type) -{ - if (type != LIQUIDITY_POOL) - { - throw NonSociRelatedException("LedgerEntry is not a LIQUIDITY_POOL"); - } -} - -static std::string -getPrimaryKey(PoolID const& poolID) -{ - TrustLineAsset tla(ASSET_TYPE_POOL_SHARE); - tla.liquidityPoolID() = poolID; - return toOpaqueBase64(tla); -} - -std::shared_ptr -LedgerTxnRoot::Impl::loadLiquidityPool(LedgerKey const& key) const -{ - auto poolAsset = getPrimaryKey(key.liquidityPool().liquidityPoolID); - - std::string liquidityPoolEntryStr; - - std::string sql = "SELECT ledgerentry " - "FROM liquiditypool " - "WHERE poolasset= :poolasset"; - auto prep = mApp.getDatabase().getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::into(liquidityPoolEntryStr)); - st.exchange(soci::use(poolAsset)); - st.define_and_bind(); - { - auto timer = mApp.getDatabase().getSelectTimer("liquiditypool"); - st.execute(true); - } - if (!st.got_data()) - { - return nullptr; - } - - LedgerEntry le; - fromOpaqueBase64(le, liquidityPoolEntryStr); - throwIfNotLiquidityPool(le.data.type()); - - return std::make_shared(std::move(le)); -} - -class BulkLoadLiquidityPoolOperation - : public DatabaseTypeSpecificOperation> -{ - Database& mDb; - std::vector mPoolAssets; - - std::vector - executeAndFetch(soci::statement& st) - { - std::string liquidityPoolEntryStr; - - st.exchange(soci::into(liquidityPoolEntryStr)); - st.define_and_bind(); - { - auto timer = mDb.getSelectTimer("liquiditypool"); - st.execute(true); - } - - std::vector res; - while (st.got_data()) - { - res.emplace_back(); - auto& le = res.back(); - - fromOpaqueBase64(le, liquidityPoolEntryStr); - throwIfNotLiquidityPool(le.data.type()); - - st.fetch(); - } - return res; - } - - public: - BulkLoadLiquidityPoolOperation(Database& db, - UnorderedSet const& keys) - : mDb(db) - { - mPoolAssets.reserve(keys.size()); - for (auto const& k : keys) - { - throwIfNotLiquidityPool(k.type()); - mPoolAssets.emplace_back( - getPrimaryKey(k.liquidityPool().liquidityPoolID)); - } - } - - std::vector - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - std::vector cstrPoolAssets; - cstrPoolAssets.reserve(mPoolAssets.size()); - for (size_t i = 0; i < mPoolAssets.size(); ++i) - { - cstrPoolAssets.emplace_back(mPoolAssets[i].c_str()); - } - - std::string sql = "WITH r AS (SELECT value FROM carray(?, ?, 'char*')) " - "SELECT ledgerentry " - "FROM liquiditypool " - "WHERE poolasset IN r"; - - auto prep = mDb.getPreparedStatement(sql); - auto be = prep.statement().get_backend(); - if (be == nullptr) - { - throw std::runtime_error("no sql backend"); - } - auto sqliteStatement = - dynamic_cast(be); - auto st = sqliteStatement->stmt_; - - sqlite3_reset(st); - sqlite3_bind_pointer(st, 1, cstrPoolAssets.data(), "carray", 0); - sqlite3_bind_int(st, 2, static_cast(cstrPoolAssets.size())); - return executeAndFetch(prep.statement()); - } - -#ifdef USE_POSTGRES - std::vector - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strPoolAssets; - marshalToPGArray(pg->conn_, strPoolAssets, mPoolAssets); - - std::string sql = "WITH r AS (SELECT unnest(:v1::TEXT[])) " - "SELECT ledgerentry " - "FROM liquiditypool " - "WHERE poolasset IN (SELECT * from r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strPoolAssets)); - return executeAndFetch(st); - } -#endif -}; - -UnorderedMap> -LedgerTxnRoot::Impl::bulkLoadLiquidityPool( - UnorderedSet const& keys) const -{ - if (!keys.empty()) - { - BulkLoadLiquidityPoolOperation op(mApp.getDatabase(), keys); - return populateLoadedEntries( - keys, mApp.getDatabase().doDatabaseTypeSpecificOperation(op)); - } - else - { - return {}; - } -} - -class BulkDeleteLiquidityPoolOperation - : public DatabaseTypeSpecificOperation -{ - Database& mDb; - LedgerTxnConsistency mCons; - std::vector mPoolAssets; - - public: - BulkDeleteLiquidityPoolOperation(Database& db, LedgerTxnConsistency cons, - std::vector const& entries) - : mDb(db), mCons(cons) - { - mPoolAssets.reserve(entries.size()); - for (auto const& e : entries) - { - releaseAssert(!e.entryExists()); - throwIfNotLiquidityPool(e.key().ledgerKey().type()); - mPoolAssets.emplace_back(getPrimaryKey( - e.key().ledgerKey().liquidityPool().liquidityPoolID)); - } - } - - void - doSociGenericOperation() - { - std::string sql = "DELETE FROM liquiditypool WHERE poolasset = :id"; - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(mPoolAssets)); - st.define_and_bind(); - { - auto timer = mDb.getDeleteTimer("liquiditypool"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mPoolAssets.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } - - void - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - doSociGenericOperation(); - } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strPoolAssets; - marshalToPGArray(pg->conn_, strPoolAssets, mPoolAssets); - - std::string sql = "WITH r AS (SELECT unnest(:v1::TEXT[])) " - "DELETE FROM liquiditypool " - "WHERE poolasset IN (SELECT * FROM r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strPoolAssets)); - st.define_and_bind(); - { - auto timer = mDb.getDeleteTimer("liquiditypool"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mPoolAssets.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif -}; - -void -LedgerTxnRoot::Impl::bulkDeleteLiquidityPool( - std::vector const& entries, LedgerTxnConsistency cons) -{ - BulkDeleteLiquidityPoolOperation op(mApp.getDatabase(), cons, entries); - mApp.getDatabase().doDatabaseTypeSpecificOperation(op); -} - -class BulkUpsertLiquidityPoolOperation - : public DatabaseTypeSpecificOperation -{ - Database& mDb; - std::vector mPoolAssets; - std::vector mAssetAs; - std::vector mAssetBs; - std::vector mLiquidityPoolEntries; - std::vector mLastModifieds; - - void - accumulateEntry(LedgerEntry const& entry) - { - throwIfNotLiquidityPool(entry.data.type()); - - auto const& lp = entry.data.liquidityPool(); - auto const& cp = lp.body.constantProduct(); - mPoolAssets.emplace_back(getPrimaryKey(lp.liquidityPoolID)); - mAssetAs.emplace_back(toOpaqueBase64(cp.params.assetA)); - mAssetBs.emplace_back(toOpaqueBase64(cp.params.assetB)); - mLiquidityPoolEntries.emplace_back(toOpaqueBase64(entry)); - mLastModifieds.emplace_back( - unsignedToSigned(entry.lastModifiedLedgerSeq)); - } - - public: - BulkUpsertLiquidityPoolOperation( - Database& Db, std::vector const& entryIter) - : mDb(Db) - { - for (auto const& e : entryIter) - { - releaseAssert(e.entryExists()); - accumulateEntry(e.entry().ledgerEntry()); - } - } - - void - doSociGenericOperation() - { - std::string sql = - "INSERT INTO liquiditypool " - "(poolasset, asseta, assetb, ledgerentry, lastmodified) " - "VALUES " - "( :id, :v1, :v2, :v3, :v4 ) " - "ON CONFLICT (poolasset) DO UPDATE SET " - "asseta = excluded.asseta, " - "assetb = excluded.assetb, " - "ledgerentry = excluded.ledgerentry, " - "lastmodified = excluded.lastmodified"; - - auto prep = mDb.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(mPoolAssets)); - st.exchange(soci::use(mAssetAs)); - st.exchange(soci::use(mAssetBs)); - st.exchange(soci::use(mLiquidityPoolEntries)); - st.exchange(soci::use(mLastModifieds)); - st.define_and_bind(); - { - auto timer = mDb.getUpsertTimer("liquiditypool"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mPoolAssets.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } - - void - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - doSociGenericOperation(); - } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strPoolAssets, strAssetAs, strAssetBs, - strLiquidityPoolEntry, strLastModifieds; - - PGconn* conn = pg->conn_; - marshalToPGArray(conn, strPoolAssets, mPoolAssets); - marshalToPGArray(conn, strAssetAs, mAssetAs); - marshalToPGArray(conn, strAssetBs, mAssetBs); - marshalToPGArray(conn, strLiquidityPoolEntry, mLiquidityPoolEntries); - marshalToPGArray(conn, strLastModifieds, mLastModifieds); - - std::string sql = - "WITH r AS " - "(SELECT unnest(:ids::TEXT[]), unnest(:v1::TEXT[]), " - "unnest(:v2::TEXT[]), unnest(:v3::TEXT[]), " - "unnest(:v4::INT[])) " - "INSERT INTO liquiditypool " - "(poolasset, asseta, assetb, ledgerentry, lastmodified) " - "SELECT * FROM r " - "ON CONFLICT (poolasset) DO UPDATE SET " - "asseta = excluded.asseta, " - "assetb = excluded.assetb, " - "ledgerentry = excluded.ledgerentry, " - "lastmodified = excluded.lastmodified"; - - auto prep = mDb.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strPoolAssets)); - st.exchange(soci::use(strAssetAs)); - st.exchange(soci::use(strAssetBs)); - st.exchange(soci::use(strLiquidityPoolEntry)); - st.exchange(soci::use(strLastModifieds)); - st.define_and_bind(); - { - auto timer = mDb.getUpsertTimer("liquiditypool"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mPoolAssets.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif -}; - -void -LedgerTxnRoot::Impl::bulkUpsertLiquidityPool( - std::vector const& entries) -{ - BulkUpsertLiquidityPoolOperation op(mApp.getDatabase(), entries); - mApp.getDatabase().doDatabaseTypeSpecificOperation(op); -} - -void -LedgerTxnRoot::Impl::dropLiquidityPools(bool rebuild) -{ - throwIfChild(); - mEntryCache.clear(); - mBestOffers.clear(); - - mApp.getDatabase().getSession() << "DROP TABLE IF EXISTS liquiditypool;"; - - if (rebuild) - { - std::string coll = mApp.getDatabase().getSimpleCollationClause(); - // The primary key is poolasset (the base-64 opaque TrustLineAsset - // containing the PoolID) instead of poolid (the base-64 opaque PoolID) - // so that we can perform the join in load pool share trust lines by - // account and asset. - mApp.getDatabase().getSession() - << "CREATE TABLE liquiditypool (" - << "poolasset TEXT " << coll << " PRIMARY KEY, " - << "asseta TEXT " << coll << " NOT NULL, " - << "assetb TEXT " << coll << " NOT NULL, " - << "ledgerentry TEXT NOT NULL, " - << "lastmodified INT NOT NULL);"; - mApp.getDatabase().getSession() << "CREATE INDEX liquiditypoolasseta " - << "ON liquiditypool(asseta);"; - mApp.getDatabase().getSession() << "CREATE INDEX liquiditypoolassetb " - << "ON liquiditypool(assetb);"; - } -} -} diff --git a/src/ledger/LedgerTxnTTLSQL.cpp b/src/ledger/LedgerTxnTTLSQL.cpp deleted file mode 100644 index 363923a14d..0000000000 --- a/src/ledger/LedgerTxnTTLSQL.cpp +++ /dev/null @@ -1,381 +0,0 @@ - -// Copyright 2023 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -#include "ledger/LedgerTxnImpl.h" -#include "ledger/LedgerTypeUtils.h" -#include "ledger/NonSociRelatedException.h" -#include "main/Application.h" -#include "util/GlobalChecks.h" -#include "util/types.h" - -namespace stellar -{ - -static void -throwIfNotTTL(LedgerEntryType type) -{ - if (type != TTL) - { - throw NonSociRelatedException("LedgerEntry is not TTL"); - } -} - -std::shared_ptr -LedgerTxnRoot::Impl::loadTTL(LedgerKey const& key) const -{ - auto keyHash = toOpaqueBase64(key.ttl().keyHash); - std::string ttlEntryStr; - - std::string sql = "SELECT ledgerentry " - "FROM ttl " - "WHERE keyhash = :keyHash"; - auto prep = mApp.getDatabase().getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::into(ttlEntryStr)); - st.exchange(soci::use(keyHash)); - st.define_and_bind(); - { - auto timer = mApp.getDatabase().getSelectTimer("ttl"); - st.execute(true); - } - if (!st.got_data()) - { - return nullptr; - } - - LedgerEntry le; - fromOpaqueBase64(le, ttlEntryStr); - throwIfNotTTL(le.data.type()); - - return std::make_shared(std::move(le)); -} -class BulkLoadTTLOperation - : public DatabaseTypeSpecificOperation> -{ - Database& mDb; - std::vector mKeyHashes; - - std::vector - executeAndFetch(soci::statement& st) - { - std::string ttlEntryStr; - - st.exchange(soci::into(ttlEntryStr)); - st.define_and_bind(); - { - auto timer = mDb.getSelectTimer("ttl"); - st.execute(true); - } - - std::vector res; - while (st.got_data()) - { - res.emplace_back(); - auto& le = res.back(); - - fromOpaqueBase64(le, ttlEntryStr); - throwIfNotTTL(le.data.type()); - - st.fetch(); - } - return res; - } - - public: - BulkLoadTTLOperation(Database& db, UnorderedSet const& keys) - : mDb(db) - { - mKeyHashes.reserve(keys.size()); - for (auto const& k : keys) - { - throwIfNotTTL(k.type()); - mKeyHashes.emplace_back(toOpaqueBase64(k.ttl().keyHash)); - } - } - - std::vector - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - std::vector cStrKeyHashes; - cStrKeyHashes.reserve(mKeyHashes.size()); - for (auto const& h : mKeyHashes) - { - cStrKeyHashes.emplace_back(h.c_str()); - } - std::string sql = "SELECT ledgerentry " - "FROM ttl " - "WHERE keyhash IN carray(?, ?, 'char*')"; - - auto prep = mDb.getPreparedStatement(sql); - auto be = prep.statement().get_backend(); - if (be == nullptr) - { - throw std::runtime_error("no sql backend"); - } - auto sqliteStatement = - dynamic_cast(be); - auto st = sqliteStatement->stmt_; - - sqlite3_reset(st); - sqlite3_bind_pointer(st, 1, (void*)cStrKeyHashes.data(), "carray", 0); - sqlite3_bind_int(st, 2, static_cast(cStrKeyHashes.size())); - return executeAndFetch(prep.statement()); - } - -#ifdef USE_POSTGRES - std::vector - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strKeyHashes; - marshalToPGArray(pg->conn_, strKeyHashes, mKeyHashes); - - std::string sql = "WITH r AS (SELECT unnest(:v1::TEXT[])) " - "SELECT ledgerentry " - "FROM ttl " - "WHERE (keyHash) IN (SELECT * from r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strKeyHashes)); - return executeAndFetch(st); - } -#endif -}; - -UnorderedMap> -LedgerTxnRoot::Impl::bulkLoadTTL(UnorderedSet const& keys) const -{ - if (!keys.empty()) - { - BulkLoadTTLOperation op(mApp.getDatabase(), keys); - return populateLoadedEntries( - keys, mApp.getDatabase().doDatabaseTypeSpecificOperation(op)); - } - else - { - return {}; - } -} - -class BulkDeleteTTLOperation : public DatabaseTypeSpecificOperation -{ - Database& mDb; - LedgerTxnConsistency mCons; - std::vector mKeyHashes; - - public: - BulkDeleteTTLOperation(Database& db, LedgerTxnConsistency cons, - std::vector const& entries) - : mDb(db), mCons(cons) - { - mKeyHashes.reserve(entries.size()); - for (auto const& e : entries) - { - releaseAssertOrThrow(!e.entryExists()); - throwIfNotTTL(e.key().ledgerKey().type()); - mKeyHashes.emplace_back( - toOpaqueBase64(e.key().ledgerKey().ttl().keyHash)); - } - } - - void - doSociGenericOperation() - { - std::string sql = "DELETE FROM ttl WHERE keyhash = :id"; - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(mKeyHashes)); - st.define_and_bind(); - { - auto timer = mDb.getDeleteTimer("ttl"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mKeyHashes.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } - - void - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - doSociGenericOperation(); - } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strKeyHashes; - marshalToPGArray(pg->conn_, strKeyHashes, mKeyHashes); - - std::string sql = "WITH r AS (SELECT unnest(:v1::TEXT[])) " - "DELETE FROM ttl " - "WHERE keyHash IN (SELECT * FROM r)"; - - auto prep = mDb.getPreparedStatement(sql); - auto& st = prep.statement(); - st.exchange(soci::use(strKeyHashes)); - st.define_and_bind(); - { - auto timer = mDb.getDeleteTimer("ttl"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mKeyHashes.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif -}; - -void -LedgerTxnRoot::Impl::bulkDeleteTTL(std::vector const& entries, - LedgerTxnConsistency cons) -{ - BulkDeleteTTLOperation op(mApp.getDatabase(), cons, entries); - mApp.getDatabase().doDatabaseTypeSpecificOperation(op); -} - -class BulkUpsertTTLOperation : public DatabaseTypeSpecificOperation -{ - Database& mDb; - std::vector mKeyHashes; - std::vector mTTLEntries; - std::vector mLastModifieds; - - void - accumulateEntry(LedgerEntry const& entry) - { - throwIfNotTTL(entry.data.type()); - - mKeyHashes.emplace_back(toOpaqueBase64(entry.data.ttl().keyHash)); - mTTLEntries.emplace_back(toOpaqueBase64(entry)); - mLastModifieds.emplace_back( - unsignedToSigned(entry.lastModifiedLedgerSeq)); - } - - public: - BulkUpsertTTLOperation(Database& Db, - std::vector const& entryIter) - : mDb(Db) - { - for (auto const& e : entryIter) - { - releaseAssert(e.entryExists()); - accumulateEntry(e.entry().ledgerEntry()); - } - } - - void - doSociGenericOperation() - { - std::string sql = "INSERT INTO ttl " - "(keyhash, ledgerentry, lastmodified) " - "VALUES " - "( :keyHash, :v1, :v2 ) " - "ON CONFLICT (keyhash) DO UPDATE SET " - "ledgerentry = excluded.ledgerentry, " - "lastmodified = excluded.lastmodified"; - - auto prep = mDb.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(mKeyHashes)); - st.exchange(soci::use(mTTLEntries)); - st.exchange(soci::use(mLastModifieds)); - st.define_and_bind(); - { - auto timer = mDb.getUpsertTimer("ttl"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mKeyHashes.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } - - void - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - doSociGenericOperation(); - } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strKeyHashes, strTTLEntries, strLastModifieds; - - PGconn* conn = pg->conn_; - marshalToPGArray(conn, strKeyHashes, mKeyHashes); - marshalToPGArray(conn, strTTLEntries, mTTLEntries); - marshalToPGArray(conn, strLastModifieds, mLastModifieds); - - std::string sql = "WITH r AS " - "(SELECT unnest(:v1::TEXT[]), " - "unnest(:v2::TEXT[]), unnest(:v3::INT[])) " - "INSERT INTO ttl " - "(keyHash, ledgerentry, lastmodified) " - "SELECT * FROM r " - "ON CONFLICT (keyhash) DO UPDATE SET " - "ledgerentry = excluded.ledgerentry, " - "lastmodified = excluded.lastmodified"; - - auto prep = mDb.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strKeyHashes)); - st.exchange(soci::use(strTTLEntries)); - st.exchange(soci::use(strLastModifieds)); - st.define_and_bind(); - { - auto timer = mDb.getUpsertTimer("ttl"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mKeyHashes.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif -}; - -void -LedgerTxnRoot::Impl::bulkUpsertTTL(std::vector const& entries) -{ - BulkUpsertTTLOperation op(mApp.getDatabase(), entries); - mApp.getDatabase().doDatabaseTypeSpecificOperation(op); -} - -void -LedgerTxnRoot::Impl::dropTTL(bool rebuild) -{ - throwIfChild(); - mEntryCache.clear(); - mBestOffers.clear(); - - std::string coll = mApp.getDatabase().getSimpleCollationClause(); - - mApp.getDatabase().getSession() << "DROP TABLE IF EXISTS ttl;"; - - if (rebuild) - { - mApp.getDatabase().getSession() - << "CREATE TABLE ttl (" - << "keyhash TEXT " << coll << " NOT NULL, " - << "ledgerentry TEXT " << coll << " NOT NULL, " - << "lastmodified INT NOT NULL, " - << "PRIMARY KEY (keyhash));"; - if (!mApp.getDatabase().isSqlite()) - { - mApp.getDatabase().getSession() << "ALTER TABLE ttl " - << "ALTER COLUMN keyhash " - << "TYPE TEXT COLLATE \"C\";"; - } - } -} - -} \ No newline at end of file diff --git a/src/ledger/LedgerTxnTrustLineSQL.cpp b/src/ledger/LedgerTxnTrustLineSQL.cpp deleted file mode 100644 index 78631cd25a..0000000000 --- a/src/ledger/LedgerTxnTrustLineSQL.cpp +++ /dev/null @@ -1,521 +0,0 @@ -// Copyright 2017 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -#include "crypto/KeyUtils.h" -#include "crypto/SecretKey.h" -#include "database/Database.h" -#include "database/DatabaseTypeSpecificOperation.h" -#include "ledger/LedgerTxnImpl.h" -#include "ledger/LedgerTypeUtils.h" -#include "ledger/NonSociRelatedException.h" -#include "main/Application.h" -#include "util/GlobalChecks.h" -#include "util/Logging.h" -#include "util/XDROperators.h" -#include "util/types.h" -#include - -namespace stellar -{ - -void -validateTrustLineKey(uint32_t ledgerVersion, LedgerKey const& key) -{ - auto const& asset = key.trustLine().asset; - - if (!isAssetValid(asset, ledgerVersion)) - { - throw NonSociRelatedException("TrustLine asset is invalid"); - } - else if (asset.type() == ASSET_TYPE_NATIVE) - { - throw NonSociRelatedException("XLM TrustLine?"); - } - else if (isIssuer(key.trustLine().accountID, asset)) - { - throw NonSociRelatedException("TrustLine accountID is issuer"); - } -} - -std::shared_ptr -LedgerTxnRoot::Impl::loadTrustLine(LedgerKey const& key) const -{ - ZoneScoped; - - validateTrustLineKey(mHeader->ledgerVersion, key); - - std::string accountIDStr = KeyUtils::toStrKey(key.trustLine().accountID); - auto asset = toOpaqueBase64(key.trustLine().asset); - - std::string trustLineEntryStr; - - auto prep = mApp.getDatabase().getPreparedStatement( - "SELECT ledgerentry " - " FROM trustlines " - "WHERE accountid= :id AND asset= :asset"); - auto& st = prep.statement(); - st.exchange(soci::into(trustLineEntryStr)); - st.exchange(soci::use(accountIDStr)); - st.exchange(soci::use(asset)); - st.define_and_bind(); - { - auto timer = mApp.getDatabase().getSelectTimer("trust"); - st.execute(true); - } - if (!st.got_data()) - { - return nullptr; - } - - LedgerEntry le; - fromOpaqueBase64(le, trustLineEntryStr); - if (le.data.type() != TRUSTLINE) - { - throw NonSociRelatedException("Loaded non-trustline entry"); - } - - return std::make_shared(std::move(le)); -} - -std::vector -LedgerTxnRoot::Impl::loadPoolShareTrustLinesByAccountAndAsset( - AccountID const& accountID, Asset const& asset) const -{ - ZoneScoped; - - std::string accountIDStr = KeyUtils::toStrKey(accountID); - auto assetStr = toOpaqueBase64(asset); - - std::string trustLineEntryStr; - - auto prep = mApp.getDatabase().getPreparedStatement( - "SELECT trustlines.ledgerentry " - "FROM trustlines " - "INNER JOIN liquiditypool " - "ON trustlines.asset = liquiditypool.poolasset " - "AND trustlines.accountid = :v1 " - "AND (liquiditypool.asseta = :v2 OR liquiditypool.assetb = :v3)"); - auto& st = prep.statement(); - st.exchange(soci::into(trustLineEntryStr)); - st.exchange(soci::use(accountIDStr)); - st.exchange(soci::use(assetStr)); - st.exchange(soci::use(assetStr)); - st.define_and_bind(); - { - auto timer = mApp.getDatabase().getSelectTimer("trust"); - st.execute(true); - } - - std::vector trustLines; - while (st.got_data()) - { - trustLines.emplace_back(); - fromOpaqueBase64(trustLines.back(), trustLineEntryStr); - if (trustLines.back().data.type() != TRUSTLINE) - { - throw NonSociRelatedException("Loaded non-trustline entry"); - } - st.fetch(); - } - return trustLines; -} - -class BulkUpsertTrustLinesOperation : public DatabaseTypeSpecificOperation -{ - Database& mDB; - std::vector mAccountIDs; - std::vector mAssets; - std::vector mTrustLineEntries; - std::vector mLastModifieds; - - public: - BulkUpsertTrustLinesOperation(Database& DB, - std::vector const& entries, - uint32_t ledgerVersion) - : mDB(DB) - { - mAccountIDs.reserve(entries.size()); - mAssets.reserve(entries.size()); - mTrustLineEntries.reserve(entries.size()); - mLastModifieds.reserve(entries.size()); - - for (auto const& e : entries) - { - releaseAssert(e.entryExists()); - releaseAssert(e.entry().type() == - InternalLedgerEntryType::LEDGER_ENTRY); - auto const& le = e.entry().ledgerEntry(); - releaseAssert(le.data.type() == TRUSTLINE); - - auto const& tl = le.data.trustLine(); - - validateTrustLineKey(ledgerVersion, e.key().ledgerKey()); - - mAccountIDs.emplace_back(KeyUtils::toStrKey(tl.accountID)); - mAssets.emplace_back(toOpaqueBase64(tl.asset)); - mTrustLineEntries.emplace_back(toOpaqueBase64(le)); - mLastModifieds.emplace_back( - unsignedToSigned(le.lastModifiedLedgerSeq)); - } - } - - void - doSociGenericOperation() - { - std::string sql = "INSERT INTO trustlines ( " - "accountid, asset, ledgerentry, lastmodified)" - "VALUES ( " - ":id, :v1, :v2, :v3 " - ") ON CONFLICT (accountid, asset) DO UPDATE SET " - "ledgerentry = excluded.ledgerentry, " - "lastmodified = excluded.lastmodified"; - auto prep = mDB.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(mAccountIDs)); - st.exchange(soci::use(mAssets)); - st.exchange(soci::use(mTrustLineEntries)); - st.exchange(soci::use(mLastModifieds)); - st.define_and_bind(); - { - auto timer = mDB.getUpsertTimer("trustline"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mAccountIDs.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } - - void - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - doSociGenericOperation(); - } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - PGconn* conn = pg->conn_; - - std::string strAccountIDs, strAssets, strTrustLineEntries, - strLastModifieds; - - marshalToPGArray(conn, strAccountIDs, mAccountIDs); - marshalToPGArray(conn, strAssets, mAssets); - marshalToPGArray(conn, strTrustLineEntries, mTrustLineEntries); - marshalToPGArray(conn, strLastModifieds, mLastModifieds); - - std::string sql = "WITH r AS (SELECT " - "unnest(:ids::TEXT[]), " - "unnest(:v1::TEXT[]), " - "unnest(:v2::TEXT[]), " - "unnest(:v3::INT[])) " - "INSERT INTO trustlines ( " - "accountid, asset, ledgerEntry, lastmodified" - ") SELECT * from r " - "ON CONFLICT (accountid, asset) DO UPDATE SET " - "ledgerentry = excluded.ledgerentry, " - "lastmodified = excluded.lastmodified"; - auto prep = mDB.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strAccountIDs)); - st.exchange(soci::use(strAssets)); - st.exchange(soci::use(strTrustLineEntries)); - st.exchange(soci::use(strLastModifieds)); - st.define_and_bind(); - { - auto timer = mDB.getUpsertTimer("trustline"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mAccountIDs.size()) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif -}; - -class BulkDeleteTrustLinesOperation : public DatabaseTypeSpecificOperation -{ - Database& mDB; - LedgerTxnConsistency mCons; - std::vector mAccountIDs; - std::vector mAssets; - - public: - BulkDeleteTrustLinesOperation(Database& DB, LedgerTxnConsistency cons, - std::vector const& entries, - uint32_t ledgerVersion) - : mDB(DB), mCons(cons) - { - mAccountIDs.reserve(entries.size()); - mAssets.reserve(entries.size()); - for (auto const& e : entries) - { - releaseAssert(!e.entryExists()); - releaseAssert(e.key().type() == - InternalLedgerEntryType::LEDGER_ENTRY); - releaseAssert(e.key().ledgerKey().type() == TRUSTLINE); - auto const& tl = e.key().ledgerKey().trustLine(); - - validateTrustLineKey(ledgerVersion, e.key().ledgerKey()); - - mAccountIDs.emplace_back(KeyUtils::toStrKey(tl.accountID)); - mAssets.emplace_back(toOpaqueBase64(tl.asset)); - } - } - - void - doSociGenericOperation() - { - std::string sql = "DELETE FROM trustlines WHERE accountid = :id " - "AND asset = :v1"; - auto prep = mDB.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(mAccountIDs)); - st.exchange(soci::use(mAssets)); - st.define_and_bind(); - { - auto timer = mDB.getDeleteTimer("trustline"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mAccountIDs.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } - - void - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - doSociGenericOperation(); - } - -#ifdef USE_POSTGRES - void - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - std::string strAccountIDs, strAssets; - PGconn* conn = pg->conn_; - marshalToPGArray(conn, strAccountIDs, mAccountIDs); - marshalToPGArray(conn, strAssets, mAssets); - std::string sql = "WITH r AS (SELECT " - "unnest(:ids::TEXT[]), " - "unnest(:v1::TEXT[])" - ") " - "DELETE FROM trustlines WHERE " - "(accountid, asset) IN (SELECT * FROM r)"; - auto prep = mDB.getPreparedStatement(sql); - soci::statement& st = prep.statement(); - st.exchange(soci::use(strAccountIDs)); - st.exchange(soci::use(strAssets)); - st.define_and_bind(); - { - auto timer = mDB.getDeleteTimer("trustline"); - st.execute(true); - } - if (static_cast(st.get_affected_rows()) != mAccountIDs.size() && - mCons == LedgerTxnConsistency::EXACT) - { - throw std::runtime_error("Could not update data in SQL"); - } - } -#endif -}; - -void -LedgerTxnRoot::Impl::bulkUpsertTrustLines( - std::vector const& entries) -{ - ZoneScoped; - ZoneValue(static_cast(entries.size())); - BulkUpsertTrustLinesOperation op(mApp.getDatabase(), entries, - mHeader->ledgerVersion); - mApp.getDatabase().doDatabaseTypeSpecificOperation(op); -} - -void -LedgerTxnRoot::Impl::bulkDeleteTrustLines( - std::vector const& entries, LedgerTxnConsistency cons) -{ - ZoneScoped; - ZoneValue(static_cast(entries.size())); - BulkDeleteTrustLinesOperation op(mApp.getDatabase(), cons, entries, - mHeader->ledgerVersion); - mApp.getDatabase().doDatabaseTypeSpecificOperation(op); -} - -void -LedgerTxnRoot::Impl::dropTrustLines(bool rebuild) -{ - throwIfChild(); - mEntryCache.clear(); - mBestOffers.clear(); - - mApp.getDatabase().getSession() << "DROP TABLE IF EXISTS trustlines;"; - - if (rebuild) - { - std::string coll = mApp.getDatabase().getSimpleCollationClause(); - mApp.getDatabase().getSession() - << "CREATE TABLE trustlines" - << "(" - << "accountid VARCHAR(56) " << coll << " NOT NULL," - << "asset TEXT " << coll << " NOT NULL," - << "ledgerentry TEXT NOT NULL," - << "lastmodified INT NOT NULL," - << "PRIMARY KEY (accountid, asset));"; - } -} - -class BulkLoadTrustLinesOperation - : public DatabaseTypeSpecificOperation> -{ - Database& mDb; - std::vector mAccountIDs; - std::vector mAssets; - - std::vector - executeAndFetch(soci::statement& st) - { - std::string accountID, asset, trustLineEntryStr; - - st.exchange(soci::into(accountID)); - st.exchange(soci::into(asset)); - st.exchange(soci::into(trustLineEntryStr)); - st.define_and_bind(); - { - auto timer = mDb.getSelectTimer("trust"); - st.execute(true); - } - - std::vector res; - while (st.got_data()) - { - res.emplace_back(); - auto& le = res.back(); - - fromOpaqueBase64(le, trustLineEntryStr); - releaseAssert(le.data.type() == TRUSTLINE); - releaseAssert(le.data.trustLine().asset.type() != - ASSET_TYPE_NATIVE); - - st.fetch(); - } - return res; - } - - public: - BulkLoadTrustLinesOperation(Database& db, - UnorderedSet const& keys) - : mDb(db) - { - mAccountIDs.reserve(keys.size()); - mAssets.reserve(keys.size()); - - for (auto const& k : keys) - { - releaseAssert(k.type() == TRUSTLINE); - if (k.trustLine().asset.type() == ASSET_TYPE_NATIVE) - { - throw NonSociRelatedException( - "TrustLine asset can't be native"); - } - - mAccountIDs.emplace_back( - KeyUtils::toStrKey(k.trustLine().accountID)); - mAssets.emplace_back(toOpaqueBase64(k.trustLine().asset)); - } - } - - virtual std::vector - doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override - { - releaseAssert(mAccountIDs.size() == mAssets.size()); - - std::vector cstrAccountIDs; - std::vector cstrAssets; - cstrAccountIDs.reserve(mAccountIDs.size()); - cstrAssets.reserve(mAssets.size()); - for (size_t i = 0; i < mAccountIDs.size(); ++i) - { - cstrAccountIDs.emplace_back(mAccountIDs[i].c_str()); - cstrAssets.emplace_back(mAssets[i].c_str()); - } - - std::string sqlJoin = "SELECT x.value, y.value FROM " - "(SELECT rowid, value FROM carray(?, ?, " - "'char*') ORDER BY rowid) " - "AS x " - "INNER JOIN (SELECT rowid, value FROM " - "carray(?, ?, 'char*') ORDER " - "BY rowid) AS y ON x.rowid = y.rowid "; - std::string sql = "WITH r AS (" + sqlJoin + - ") SELECT accountid, asset, ledgerentry " - "FROM trustlines WHERE (accountid, asset) IN r"; - - auto prep = mDb.getPreparedStatement(sql); - auto be = prep.statement().get_backend(); - if (be == nullptr) - { - throw std::runtime_error("no sql backend"); - } - auto sqliteStatement = - dynamic_cast(be); - auto st = sqliteStatement->stmt_; - - sqlite3_reset(st); - sqlite3_bind_pointer(st, 1, cstrAccountIDs.data(), "carray", 0); - sqlite3_bind_int(st, 2, static_cast(cstrAccountIDs.size())); - sqlite3_bind_pointer(st, 3, cstrAssets.data(), "carray", 0); - sqlite3_bind_int(st, 4, static_cast(cstrAssets.size())); - return executeAndFetch(prep.statement()); - } - -#ifdef USE_POSTGRES - virtual std::vector - doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override - { - releaseAssert(mAccountIDs.size() == mAssets.size()); - - std::string strAccountIDs; - std::string strAssets; - marshalToPGArray(pg->conn_, strAccountIDs, mAccountIDs); - marshalToPGArray(pg->conn_, strAssets, mAssets); - - auto prep = mDb.getPreparedStatement( - "WITH r AS (SELECT unnest(:v1::TEXT[]), " - "unnest(:v2::TEXT[])) SELECT accountid, asset, " - "ledgerentry " - " FROM trustlines " - "WHERE (accountid, asset) IN (SELECT * " - "FROM r)"); - auto& st = prep.statement(); - st.exchange(soci::use(strAccountIDs)); - st.exchange(soci::use(strAssets)); - return executeAndFetch(st); - } -#endif -}; - -UnorderedMap> -LedgerTxnRoot::Impl::bulkLoadTrustLines( - UnorderedSet const& keys) const -{ - ZoneScoped; - ZoneValue(static_cast(keys.size())); - if (!keys.empty()) - { - BulkLoadTrustLinesOperation op(mApp.getDatabase(), keys); - return populateLoadedEntries( - keys, mApp.getDatabase().doDatabaseTypeSpecificOperation(op)); - } - else - { - return {}; - } -} -} diff --git a/src/ledger/NetworkConfig.cpp b/src/ledger/NetworkConfig.cpp index e1ae2e43a0..9922343e70 100644 --- a/src/ledger/NetworkConfig.cpp +++ b/src/ledger/NetworkConfig.cpp @@ -920,7 +920,7 @@ initialBucketListSizeWindow(Application& app) // copies of the current BL size. If the bucketlist is disabled for // testing, just fill with ones to avoid triggering asserts. auto blSize = app.getConfig().MODE_ENABLES_BUCKETLIST - ? app.getBucketManager().getBucketList().getSize() + ? app.getBucketManager().getLiveBucketList().getSize() : 1; for (uint64_t i = 0; i < InitialSorobanNetworkConfig::BUCKET_LIST_SIZE_WINDOW_SAMPLE_SIZE; @@ -1046,7 +1046,7 @@ SorobanNetworkConfig::isValidConfigSettingEntry(ConfigSettingEntry const& cfg, cfg.stateArchivalSettings().startingEvictionScanLevel >= MinimumSorobanNetworkConfig::STARTING_EVICTION_LEVEL && cfg.stateArchivalSettings().startingEvictionScanLevel < - BucketList::kNumLevels && + LiveBucketList::kNumLevels && cfg.stateArchivalSettings().bucketListWindowSamplePeriod >= MinimumSorobanNetworkConfig::BUCKETLIST_WINDOW_SAMPLE_PERIOD; @@ -1698,7 +1698,7 @@ SorobanNetworkConfig::maybeSnapshotBucketListSize(uint32_t currLedger, // Update in memory snapshots mBucketListSizeSnapshots.pop_front(); mBucketListSizeSnapshots.push_back( - app.getBucketManager().getBucketList().getSize()); + app.getBucketManager().getLiveBucketList().getSize()); writeBucketListSizeWindow(ltx); updateBucketListSizeAverage(); @@ -1861,13 +1861,12 @@ SorobanNetworkConfig::writeAllSettings(AbstractLedgerTxn& ltx, // If testing with BucketListDB, we need to commit directly to the // BucketList - if (app.getConfig().isUsingBucketListDB()) + if (!app.getConfig().MODE_USES_IN_MEMORY_LEDGER) { auto lcl = app.getLedgerManager().getLastClosedLedgerHeader(); lcl.header.ledgerSeq += 1; - BucketTestUtils::addBatchAndUpdateSnapshot( - app.getBucketManager().getBucketList(), app, lcl.header, {}, - entries, {}); + BucketTestUtils::addLiveBatchAndUpdateSnapshot(app, lcl.header, {}, + entries, {}); } } #endif diff --git a/src/ledger/test/LedgerCloseMetaStreamTests.cpp b/src/ledger/test/LedgerCloseMetaStreamTests.cpp index efdff716d5..3852fe2fe5 100644 --- a/src/ledger/test/LedgerCloseMetaStreamTests.cpp +++ b/src/ledger/test/LedgerCloseMetaStreamTests.cpp @@ -91,9 +91,8 @@ TEST_CASE("LedgerCloseMetaStream file descriptor - LIVE_NODE", Config cfg4 = getTestConfig(4); Config cfg5 = getTestConfig( 5, - Config:: - TESTDB_IN_MEMORY_NO_OFFERS); // needed by - // EXPERIMENTAL_PRECAUTION_DELAY_META + Config::TESTDB_IN_MEMORY); // needed by + // EXPERIMENTAL_PRECAUTION_DELAY_META // Step 2: open writable files and pass them to configs 4 and 5 // (watchers). @@ -240,111 +239,6 @@ TEST_CASE("LedgerCloseMetaStream file descriptor - LIVE_NODE", std::vector(lcms.begin(), lcms.end() - 1)); } -TEST_CASE("LedgerCloseMetaStream file descriptor - REPLAY_IN_MEMORY", - "[ledgerclosemetastreamreplay]") -{ - // Step 1: generate some history for replay. - using namespace stellar::historytestutils; - TmpDirHistoryConfigurator tCfg; - { - Config genCfg = getTestConfig(0, Config::TESTDB_DEFAULT); - genCfg.MANUAL_CLOSE = false; - VirtualClock genClock; - genCfg = tCfg.configure(genCfg, true); - auto genApp = createTestApplication(genClock, genCfg); - auto& genHam = genApp->getHistoryArchiveManager(); - genHam.initializeHistoryArchive(tCfg.getArchiveDirName()); - for (size_t i = 0; i < 100; ++i) - { - genClock.crank(false); - } - auto& genHm = genApp->getHistoryManager(); - while (genHm.getPublishSuccessCount() < 5) - { - genClock.crank(true); - } - while (genClock.cancelAllEvents() || - genApp->getProcessManager().getNumRunningProcesses() > 0) - { - genClock.crank(false); - } - } - - // Step 2: open a writable file descriptor. - TmpDirManager tdm(std::string("streamtmp-") + binToHex(randomBytes(8))); - TmpDir td = tdm.tmpDir("streams"); - std::string metaPath = td.getName() + "/stream.xdr"; - auto cfg1 = getTestConfig(1); -#ifdef _WIN32 - cfg1.METADATA_OUTPUT_STREAM = metaPath; -#else - int fd = ::open(metaPath.c_str(), O_CREAT | O_WRONLY, 0644); - REQUIRE(fd != -1); - cfg1.METADATA_OUTPUT_STREAM = fmt::format(FMT_STRING("fd:{}"), fd); -#endif - - bool const delayMeta = GENERATE(true, false); - - // Step 3: pass it to an application and have it catch up to the generated - // history, streaming ledgerCloseMeta to the file descriptor. - Hash hash; - { - auto cfg = tCfg.configure(cfg1, false); - cfg.NODE_IS_VALIDATOR = false; - cfg.FORCE_SCP = false; - cfg.RUN_STANDALONE = true; - cfg.setInMemoryMode(); - cfg.EXPERIMENTAL_PRECAUTION_DELAY_META = delayMeta; - VirtualClock clock; - auto app = createTestApplication(clock, cfg, /*newdb=*/false); - - CatchupConfiguration cc{CatchupConfiguration::CURRENT, - std::numeric_limits::max(), - CatchupConfiguration::Mode::OFFLINE_COMPLETE}; - Json::Value catchupInfo; - auto& ham = app->getHistoryArchiveManager(); - auto& lm = app->getLedgerManager(); - auto archive = ham.selectRandomReadableHistoryArchive(); - int res = catchup(app, cc, catchupInfo, archive); - REQUIRE(res == 0); - hash = lm.getLastClosedLedgerHeader().hash; - while (clock.cancelAllEvents() || - app->getProcessManager().getNumRunningProcesses() > 0) - { - clock.crank(false); - } - } - - // Step 4: reopen the file as an XDR stream and read back the LCMs - // and check they have the expected content. - // - // The EXPERIMENTAL_PRECAUTION_DELAY_META case should still have streamed - // the latest meta, because catchup should have validated that ledger's hash - // by validating a chain of hashes back from one obtained from consensus. - XDRInputFileStream stream; - stream.open(metaPath); - LedgerCloseMeta lcm; - size_t nLcm = 1; - while (stream && stream.readOne(lcm)) - { - ++nLcm; - } - // 5 checkpoints is ledger 0x13f - REQUIRE(nLcm == 0x13f); - if (lcm.v() == 0) - { - REQUIRE(lcm.v0().ledgerHeader.hash == hash); - } - else if (lcm.v() == 1) - { - REQUIRE(lcm.v1().ledgerHeader.hash == hash); - } - else - { - REQUIRE(false); - } -} - TEST_CASE("EXPERIMENTAL_PRECAUTION_DELAY_META configuration", "[ledgerclosemetastreamlive][ledgerclosemetastreamreplay]") { @@ -356,49 +250,9 @@ TEST_CASE("EXPERIMENTAL_PRECAUTION_DELAY_META configuration", { cfg.METADATA_OUTPUT_STREAM = ""; auto const delayMeta = GENERATE(false, true); - auto const inMemory = GENERATE(false, true); cfg.EXPERIMENTAL_PRECAUTION_DELAY_META = delayMeta; - if (inMemory) - { - cfg.setInMemoryMode(); - } REQUIRE_NOTHROW(createTestApplication(clock, cfg)); } - - SECTION("EXPERIMENTAL_PRECAUTION_DELAY_META together with " - "METADATA_OUTPUT_STREAM requires --in-memory") - { - TmpDirManager tdm(std::string("streamtmp-") + binToHex(randomBytes(8))); - TmpDir td = tdm.tmpDir("streams"); - std::string metaPath = td.getName() + "/stream.xdr"; - std::string metaStream; - -#ifdef _WIN32 - metaStream = metaPath; -#else - int fd = ::open(metaPath.c_str(), O_CREAT | O_WRONLY, 0644); - REQUIRE(fd != -1); - metaStream = fmt::format(FMT_STRING("fd:{}"), fd); -#endif - - cfg.METADATA_OUTPUT_STREAM = metaStream; - auto const delayMeta = GENERATE(false, true); - auto const inMemory = GENERATE(false, true); - cfg.EXPERIMENTAL_PRECAUTION_DELAY_META = delayMeta; - if (inMemory) - { - cfg.setInMemoryMode(); - } - if (delayMeta && !inMemory) - { - REQUIRE_THROWS_AS(createTestApplication(clock, cfg), - std::invalid_argument); - } - else - { - REQUIRE_NOTHROW(createTestApplication(clock, cfg)); - } - } } TEST_CASE("METADATA_DEBUG_LEDGERS works", "[metadebug]") @@ -482,7 +336,8 @@ TEST_CASE("METADATA_DEBUG_LEDGERS works", "[metadebug]") } } -TEST_CASE_VERSIONS("meta stream contains reasonable meta", "[ledgerclosemeta]") +TEST_CASE_VERSIONS("meta stream contains reasonable meta", + "[ledgerclosemeta][archival]") { auto test = [&](Config cfg, bool isSoroban) { using namespace stellar::txtest; diff --git a/src/ledger/test/LedgerTestUtils.cpp b/src/ledger/test/LedgerTestUtils.cpp index 6835e3445f..930b7334da 100644 --- a/src/ledger/test/LedgerTestUtils.cpp +++ b/src/ledger/test/LedgerTestUtils.cpp @@ -15,6 +15,7 @@ #include "util/types.h" #include "xdr/Stellar-contract.h" #include "xdr/Stellar-ledger-entries.h" +#include "xdr/Stellar-types.h" #include #include #include @@ -741,6 +742,29 @@ generateValidLedgerEntryWithTypes( } } +std::vector +generateValidUniqueLedgerKeysWithTypes( + std::unordered_set const& types, size_t n, + UnorderedSet& seenKeys) +{ + std::vector res; + res.reserve(n); + while (res.size() < n) + { + + auto entry = generateValidLedgerEntryWithTypes(types); + auto key = LedgerEntryKey(entry); + if (seenKeys.find(key) != seenKeys.end()) + { + continue; + } + + seenKeys.insert(key); + res.emplace_back(key); + } + return res; +} + std::vector generateValidUniqueLedgerEntriesWithTypes( std::unordered_set const& types, size_t n) diff --git a/src/ledger/test/LedgerTestUtils.h b/src/ledger/test/LedgerTestUtils.h index 27277b0ac3..ca85ea1d75 100644 --- a/src/ledger/test/LedgerTestUtils.h +++ b/src/ledger/test/LedgerTestUtils.h @@ -6,6 +6,8 @@ #include "history/HistoryManager.h" #include "overlay/StellarXDR.h" +#include "util/UnorderedSet.h" +#include "util/types.h" namespace stellar { @@ -45,6 +47,10 @@ std::vector generateValidUniqueLedgerEntries(size_t n); std::vector generateValidLedgerEntryKeysWithExclusions( std::unordered_set const& excludedTypes, size_t n); +std::vector generateValidUniqueLedgerKeysWithTypes( + std::unordered_set const& types, size_t n, + UnorderedSet& seenKeys); + std::vector generateUniqueValidSorobanLedgerEntryKeys(size_t n); std::vector generateValidUniqueLedgerEntryKeysWithExclusions( diff --git a/src/ledger/test/LedgerTxnTests.cpp b/src/ledger/test/LedgerTxnTests.cpp index ba3d2a698c..11f0a2c9fd 100644 --- a/src/ledger/test/LedgerTxnTests.cpp +++ b/src/ledger/test/LedgerTxnTests.cpp @@ -339,13 +339,18 @@ TEST_CASE("LedgerTxn round trip", "[ledgertxn]") std::bernoulli_distribution shouldCommitDist; auto generateNew = [](AbstractLedgerTxn& ltx, - UnorderedMap& entries) { + UnorderedMap& entries, + bool offerOnly) { size_t const NEW_ENTRIES = 100; UnorderedMap newBatch; while (newBatch.size() < NEW_ENTRIES) { - auto le = LedgerTestUtils::generateValidLedgerEntryWithExclusions( - {CONFIG_SETTING}); + auto le = + offerOnly + ? LedgerTestUtils::generateValidLedgerEntryOfType(OFFER) + : LedgerTestUtils::generateValidLedgerEntryWithExclusions( + {CONFIG_SETTING}); + auto key = LedgerEntryKey(le); if (entries.find(LedgerEntryKey(le)) == entries.end()) { @@ -428,7 +433,7 @@ TEST_CASE("LedgerTxn round trip", "[ledgertxn]") } }; - auto runTest = [&](AbstractLedgerTxnParent& ltxParent) { + auto runTest = [&](AbstractLedgerTxnParent& ltxParent, bool offerOnly) { UnorderedMap entries; UnorderedSet dead; size_t const NUM_BATCHES = 10; @@ -439,7 +444,7 @@ TEST_CASE("LedgerTxn round trip", "[ledgertxn]") UnorderedMap updatedEntries = entries; UnorderedSet updatedDead = dead; LedgerTxn ltx1(ltxParent); - generateNew(ltx1, updatedEntries); + generateNew(ltx1, updatedEntries, offerOnly); generateModify(ltx1, updatedEntries); generateErase(ltx1, updatedEntries, updatedDead); @@ -459,7 +464,7 @@ TEST_CASE("LedgerTxn round trip", "[ledgertxn]") auto app = createTestApplication(clock, getTestConfig(0, mode)); LedgerTxn ltx1(app->getLedgerTxnRoot()); - runTest(ltx1); + runTest(ltx1, false); } SECTION("round trip to LedgerTxnRoot") @@ -468,13 +473,9 @@ TEST_CASE("LedgerTxn round trip", "[ledgertxn]") { VirtualClock clock; // BucketListDB incompatible with direct root commits - auto app = createTestApplication( - clock, - getTestConfig(0, mode == Config::TESTDB_DEFAULT - ? Config::TESTDB_IN_MEMORY_NO_OFFERS - : mode)); + auto app = createTestApplication(clock, getTestConfig(0, mode)); - runTest(app->getLedgerTxnRoot()); + runTest(app->getLedgerTxnRoot(), true); } SECTION("with no cache") @@ -482,31 +483,23 @@ TEST_CASE("LedgerTxn round trip", "[ledgertxn]") VirtualClock clock; // BucketListDB incompatible with direct root commits - auto cfg = - getTestConfig(0, mode == Config::TESTDB_DEFAULT - ? Config::TESTDB_IN_MEMORY_NO_OFFERS - : mode); + auto cfg = getTestConfig(0, mode); cfg.ENTRY_CACHE_SIZE = 0; auto app = createTestApplication(clock, cfg); - runTest(app->getLedgerTxnRoot()); + runTest(app->getLedgerTxnRoot(), true); } } }; - SECTION("default") - { - runTestWithDbMode(Config::TESTDB_DEFAULT); - } - - SECTION("sqlite") + SECTION("bucketlist") { - runTestWithDbMode(Config::TESTDB_ON_DISK_SQLITE); + runTestWithDbMode(Config::TESTDB_BUCKET_DB_PERSISTENT); } SECTION("in-memory") { - runTestWithDbMode(Config::TESTDB_IN_MEMORY_NO_OFFERS); + runTestWithDbMode(Config::TESTDB_IN_MEMORY); } #ifdef USE_POSTGRES @@ -713,19 +706,14 @@ TEST_CASE("LedgerTxn createWithoutLoading and updateWithoutLoading", } }; - SECTION("default") + SECTION("bucketlist") { - runTest(Config::TESTDB_DEFAULT); - } - - SECTION("sqlite") - { - runTest(Config::TESTDB_ON_DISK_SQLITE); + runTest(Config::TESTDB_BUCKET_DB_PERSISTENT); } SECTION("in-memory") { - runTest(Config::TESTDB_IN_MEMORY_NO_OFFERS); + runTest(Config::TESTDB_IN_MEMORY); } #ifdef USE_POSTGRES @@ -813,19 +801,14 @@ TEST_CASE("LedgerTxn erase", "[ledgertxn]") validate(ltx3, {}); } }; - SECTION("default") + SECTION("bucketlist") { - runTest(Config::TESTDB_DEFAULT); - } - - SECTION("sqlite") - { - runTest(Config::TESTDB_ON_DISK_SQLITE); + runTest(Config::TESTDB_BUCKET_DB_PERSISTENT); } SECTION("in-memory") { - runTest(Config::TESTDB_IN_MEMORY_NO_OFFERS); + runTest(Config::TESTDB_IN_MEMORY); } #ifdef USE_POSTGRES @@ -918,19 +901,14 @@ TEST_CASE("LedgerTxn eraseWithoutLoading", "[ledgertxn]") } }; - SECTION("default") - { - runTest(Config::TESTDB_DEFAULT); - } - - SECTION("sqlite") + SECTION("bucketlist") { - runTest(Config::TESTDB_ON_DISK_SQLITE); + runTest(Config::TESTDB_BUCKET_DB_PERSISTENT); } SECTION("in-memory") { - runTest(Config::TESTDB_IN_MEMORY_NO_OFFERS); + runTest(Config::TESTDB_IN_MEMORY); } #ifdef USE_POSTGRES @@ -1035,7 +1013,7 @@ testInflationWinners( { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); testAtRoot(*app); } @@ -1044,7 +1022,7 @@ testInflationWinners( if (updates.size() > 1) { VirtualClock clock; - auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.ENTRY_CACHE_SIZE = 0; auto app = createTestApplication(clock, cfg); @@ -1055,7 +1033,7 @@ testInflationWinners( { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); testInflationWinners(app->getLedgerTxnRoot(), maxWinners, minBalance, expected, updates.cbegin(), updates.cend()); @@ -1384,19 +1362,14 @@ TEST_CASE("LedgerTxn loadHeader", "[ledgertxn]") } }; - SECTION("default") - { - runTest(Config::TESTDB_DEFAULT); - } - - SECTION("sqlite") + SECTION("bucketlist") { - runTest(Config::TESTDB_ON_DISK_SQLITE); + runTest(Config::TESTDB_BUCKET_DB_PERSISTENT); } SECTION("in-memory") { - runTest(Config::TESTDB_IN_MEMORY_NO_OFFERS); + runTest(Config::TESTDB_IN_MEMORY); } #ifdef USE_POSTGRES @@ -1494,103 +1467,16 @@ TEST_CASE_VERSIONS("LedgerTxn load", "[ledgertxn]") } }); } - - SECTION("load tests for all versions") - { - for_all_versions(*app, [&]() { - SECTION("invalid keys") - { - LedgerTxn ltx1(app->getLedgerTxnRoot()); - - auto acc = txtest::getAccount("acc"); - auto acc2 = txtest::getAccount("acc2"); - - { - auto native = txtest::makeNativeAsset(); - UNSCOPED_INFO("native asset on trustline key"); - - // Invariant not supported in BucketListDB and in-memory - // mode - if (mode != Config::TESTDB_DEFAULT && - mode != Config::TESTDB_IN_MEMORY_NO_OFFERS) - { - REQUIRE_THROWS_AS(ltx1.load(trustlineKey( - acc.getPublicKey(), native)), - NonSociRelatedException); - } - } - - { - auto usd = txtest::makeAsset(acc, "usd"); - UNSCOPED_INFO("issuer on trustline key"); - - // Invariant not supported in BucketListDB and in-memory - // mode - if (mode != Config::TESTDB_DEFAULT && - mode != Config::TESTDB_IN_MEMORY_NO_OFFERS) - { - REQUIRE_THROWS_AS(ltx1.load(trustlineKey( - acc.getPublicKey(), usd)), - NonSociRelatedException); - } - } - - { - std::string accountIDStr, issuerStr, assetCodeStr; - auto invalidAssets = testutil::getInvalidAssets(acc); - for (auto const& asset : invalidAssets) - { - auto key = trustlineKey(acc2.getPublicKey(), asset); - - // Invariant not supported in BucketListDB and - // in-memory mode - if (mode != Config::TESTDB_DEFAULT && - mode != Config::TESTDB_IN_MEMORY_NO_OFFERS) - { - REQUIRE_THROWS_AS(ltx1.load(key), - NonSociRelatedException); - } - } - } - - SECTION("load generated keys") - { - for (int i = 0; i < 1000; ++i) - { - LedgerKey lk = autocheck::generator()(5); - - try - { - ltx1.load(lk); - } - catch (NonSociRelatedException&) - { - // this is fine - } - catch (std::exception&) - { - REQUIRE(false); - } - } - } - } - }); - } }; - SECTION("default") + SECTION("bucketlist") { - runTest(Config::TESTDB_DEFAULT); - } - - SECTION("sqlite") - { - runTest(Config::TESTDB_ON_DISK_SQLITE); + runTest(Config::TESTDB_BUCKET_DB_PERSISTENT); } SECTION("in-memory") { - runTest(Config::TESTDB_IN_MEMORY_NO_OFFERS); + runTest(Config::TESTDB_IN_MEMORY); } #ifdef USE_POSTGRES @@ -1933,19 +1819,14 @@ TEST_CASE("LedgerTxn loadAllOffers", "[ledgertxn]") } }; - SECTION("default") - { - runTest(Config::TESTDB_DEFAULT); - } - - SECTION("sqlite") + SECTION("bucketlist") { - runTest(Config::TESTDB_ON_DISK_SQLITE); + runTest(Config::TESTDB_BUCKET_DB_PERSISTENT); } SECTION("in-memory") { - runTest(Config::TESTDB_IN_MEMORY_NO_OFFERS); + runTest(Config::TESTDB_IN_MEMORY); } #ifdef USE_POSTGRES @@ -2334,14 +2215,19 @@ TEST_CASE("LedgerTxn loadBestOffer", "[ledgertxn]") loadAccount(ltx2, account.accountID); } - // Note that we can't prefetch for more than 1000 offers - double expectedPrefetchHitRate = - std::min(numOffers - offerID, - static_cast(getMaxOffersToCross())) / - static_cast(accounts.size()); - REQUIRE(fabs(expectedPrefetchHitRate - - ltx2.getPrefetchHitRate()) < .000001); - REQUIRE(preLoadPrefetchHitRate < ltx2.getPrefetchHitRate()); + // Prefetch doesn't work in in-memory mode, but this is for + // testing only so we only care about accuracy + if (mode != Config::TESTDB_IN_MEMORY) + { + // Note that we can't prefetch for more than 1000 offers + double expectedPrefetchHitRate = + std::min(numOffers - offerID, + static_cast(getMaxOffersToCross())) / + static_cast(accounts.size()); + REQUIRE(fabs(expectedPrefetchHitRate - + ltx2.getPrefetchHitRate()) < .000001); + REQUIRE(preLoadPrefetchHitRate < ltx2.getPrefetchHitRate()); + } }; SECTION("prefetch for all worse remaining offers") @@ -2362,14 +2248,16 @@ TEST_CASE("LedgerTxn loadBestOffer", "[ledgertxn]") } }; - SECTION("default") + SECTION("bucketlist") { - runTest(Config::TESTDB_DEFAULT); + runTest(Config::TESTDB_BUCKET_DB_PERSISTENT); } - SECTION("sqlite") + // This mode is only used in testing, but we should still make sure it works + // for other tests that leverage it + SECTION("in-memory") { - runTest(Config::TESTDB_ON_DISK_SQLITE); + runTest(Config::TESTDB_IN_MEMORY); } #ifdef USE_POSTGRES @@ -2738,7 +2626,7 @@ TEST_CASE("LedgerTxnRoot prefetch classic entries", "[ledgertxn]") e.lastModifiedLedgerSeq = 1; entrySet.emplace(e); } - if (cfg.isUsingBucketListDB()) + if (!cfg.MODE_USES_IN_MEMORY_LEDGER) { std::vector ledgerVect{entrySet.begin(), entrySet.end()}; @@ -2747,9 +2635,8 @@ TEST_CASE("LedgerTxnRoot prefetch classic entries", "[ledgertxn]") .getLastClosedLedgerHeader() .header.ledgerVersion; lh.ledgerSeq = 2; - BucketTestUtils::addBatchAndUpdateSnapshot( - app->getBucketManager().getBucketList(), *app, lh, {}, - ledgerVect, {}); + BucketTestUtils::addLiveBatchAndUpdateSnapshot(*app, lh, {}, + ledgerVect, {}); } ltx.commit(); @@ -2790,14 +2677,9 @@ TEST_CASE("LedgerTxnRoot prefetch classic entries", "[ledgertxn]") } }; - SECTION("default") - { - runTest(getTestConfig()); - } - - SECTION("sqlite") + SECTION("bucketlist") { - runTest(getTestConfig(0, Config::TESTDB_ON_DISK_SQLITE)); + runTest(getTestConfig(Config::TESTDB_BUCKET_DB_PERSISTENT)); } #ifdef USE_POSTGRES @@ -2822,7 +2704,9 @@ TEST_CASE("Create performance benchmark", "[!hide][createbench]") { // First add some bulking entries so we're not using a // totally empty database. - entries = LedgerTestUtils::generateValidLedgerEntries(n); + entries = + LedgerTestUtils::generateValidUniqueLedgerEntriesWithTypes( + {OFFER}, n); LedgerTxn ltx(app->getLedgerTxnRoot()); for (auto e : entries) { @@ -2832,7 +2716,8 @@ TEST_CASE("Create performance benchmark", "[!hide][createbench]") } // Then do some precise timed creates. - entries = LedgerTestUtils::generateValidLedgerEntries(n); + entries = LedgerTestUtils::generateValidUniqueLedgerEntriesWithTypes( + {OFFER}, n); auto& m = app->getMetrics().NewMeter({"ledger", "create", "commit"}, "entry"); while (!entries.empty()) @@ -2859,8 +2744,8 @@ TEST_CASE("Create performance benchmark", "[!hide][createbench]") SECTION("sqlite") { - runTest(Config::TESTDB_ON_DISK_SQLITE, true); - runTest(Config::TESTDB_ON_DISK_SQLITE, false); + runTest(Config::TESTDB_BUCKET_DB_PERSISTENT, true); + runTest(Config::TESTDB_BUCKET_DB_PERSISTENT, false); } #ifdef USE_POSTGRES @@ -2886,7 +2771,9 @@ TEST_CASE("Erase performance benchmark", "[!hide][erasebench]") { // First add some bulking entries so we're not using a // totally empty database. - entries = LedgerTestUtils::generateValidLedgerEntries(n); + entries = + LedgerTestUtils::generateValidUniqueLedgerEntriesWithTypes( + {OFFER}, n); LedgerTxn ltx(app->getLedgerTxnRoot()); for (auto e : entries) { @@ -2922,8 +2809,8 @@ TEST_CASE("Erase performance benchmark", "[!hide][erasebench]") SECTION("sqlite") { - runTest(Config::TESTDB_ON_DISK_SQLITE, true); - runTest(Config::TESTDB_ON_DISK_SQLITE, false); + runTest(Config::TESTDB_BUCKET_DB_PERSISTENT, true); + runTest(Config::TESTDB_BUCKET_DB_PERSISTENT, false); } #ifdef USE_POSTGRES @@ -2942,7 +2829,6 @@ TEST_CASE("LedgerTxnRoot prefetch soroban entries", "[ledgertxn]") // Test setup. VirtualClock clock; - cfg.DEPRECATED_SQL_LEDGER_STATE = false; Application::pointer app = createTestApplication(clock, cfg); UnorderedSet keysToPrefetch; auto& root = app->getLedgerTxnRoot(); @@ -2980,9 +2866,8 @@ TEST_CASE("LedgerTxnRoot prefetch soroban entries", "[ledgertxn]") .getLastClosedLedgerHeader() .header.ledgerVersion; lh.ledgerSeq = 2; - BucketTestUtils::addBatchAndUpdateSnapshot( - app->getBucketManager().getBucketList(), *app, lh, {}, ledgerVect, - deadKeyVect); + BucketTestUtils::addLiveBatchAndUpdateSnapshot(*app, lh, {}, ledgerVect, + deadKeyVect); ltx.commit(); auto addTxn = [&](bool enoughQuota, std::vector entries, @@ -3150,219 +3035,6 @@ TEST_CASE("LedgerKeyMeter tests") REQUIRE(lkMeter.canLoad(ttlKey, std::numeric_limits::max())); } -TEST_CASE("Bulk load batch size benchmark", "[!hide][bulkbatchsizebench]") -{ - size_t floor = 1000; - size_t ceiling = 20000; - size_t bestBatchSize = 0; - double bestTime = 0xffffffff; - - auto runTest = [&](Config::TestDbMode mode) { - for (; floor <= ceiling; floor += 1000) - { - UnorderedSet keys; - VirtualClock clock; - Config cfg(getTestConfig(0, mode)); - cfg.PREFETCH_BATCH_SIZE = floor; - - auto app = createTestApplication(clock, cfg); - - auto& root = app->getLedgerTxnRoot(); - - auto entries = LedgerTestUtils::generateValidLedgerEntries(50000); - LedgerTxn ltx(root); - for (auto e : entries) - { - ltx.createWithoutLoading(e); - keys.insert(LedgerEntryKey(e)); - } - ltx.commit(); - - auto& m = app->getMetrics().NewTimer( - {"ledger", "bulk-load", std::to_string(floor) + " batch"}); - LedgerTxn ltx2(root); - { - m.TimeScope(); - root.prefetchClassic(keys); - } - ltx2.commit(); - - auto total = m.sum(); - CLOG_INFO(Ledger, "Bulk Load test batch size: {} took {}", floor, - total); - - if (total < bestTime) - { - bestBatchSize = floor; - bestTime = total; - } - } - CLOG_INFO(Ledger, "Best batch and best time per entry {} : {}", - bestBatchSize, bestTime); - }; - - SECTION("sqlite") - { - runTest(Config::TESTDB_ON_DISK_SQLITE); - } - -#ifdef USE_POSTGRES - SECTION("postgresql") - { - runTest(Config::TESTDB_POSTGRESQL); - } -#endif -} - -TEST_CASE("Signers performance benchmark", "[!hide][signersbench]") -{ - auto getTimeScope = [](Application& app, uint32_t numSigners, - std::string const& phase) { - std::string benchmarkStr = "benchmark-" + std::to_string(numSigners); - return app.getMetrics() - .NewTimer({"signers", benchmarkStr, phase}) - .TimeScope(); - }; - - auto getTimeSpent = [](Application& app, uint32_t numSigners, - std::string const& phase) { - std::string benchmarkStr = "benchmark-" + std::to_string(numSigners); - auto time = - app.getMetrics().NewTimer({"signers", benchmarkStr, phase}).sum(); - return phase + ": " + std::to_string(time) + " ms"; - }; - - auto generateEntries = [](size_t numAccounts, uint32_t numSigners) { - std::vector accounts; - accounts.reserve(numAccounts); - for (size_t i = 0; i < numAccounts; ++i) - { - LedgerEntry le; - le.data.type(ACCOUNT); - le.lastModifiedLedgerSeq = 2; - le.data.account() = LedgerTestUtils::generateValidAccountEntry(); - - auto& signers = le.data.account().signers; - if (signers.size() > numSigners) - { - signers.resize(numSigners); - } - else if (signers.size() < numSigners) - { - signers.reserve(numSigners); - std::generate_n(std::back_inserter(signers), - numSigners - signers.size(), - std::bind(autocheck::generator(), 5)); - std::sort(signers.begin(), signers.end(), - [](Signer const& lhs, Signer const& rhs) { - return lhs.key < rhs.key; - }); - } - - accounts.emplace_back(le); - } - return accounts; - }; - - auto generateKeys = [](std::vector const& accounts) { - std::vector keys; - keys.reserve(accounts.size()); - std::transform( - accounts.begin(), accounts.end(), std::back_inserter(keys), - [](LedgerEntry const& le) { return LedgerEntryKey(le); }); - return keys; - }; - - auto writeEntries = - [&getTimeScope](Application& app, uint32_t numSigners, - std::vector const& accounts) { - CLOG_WARNING(Ledger, "Creating accounts"); - LedgerTxn ltx(app.getLedgerTxnRoot()); - { - auto timer = getTimeScope(app, numSigners, "create"); - for (auto const& le : accounts) - { - ltx.create(le); - } - } - - CLOG_WARNING(Ledger, "Writing accounts"); - { - auto timer = getTimeScope(app, numSigners, "write"); - ltx.commit(); - } - }; - - auto readEntriesAndUpdateLastModified = - [&getTimeScope](Application& app, uint32_t numSigners, - std::vector const& accounts) { - CLOG_WARNING(Ledger, "Reading accounts"); - LedgerTxn ltx(app.getLedgerTxnRoot()); - { - auto timer = getTimeScope(app, numSigners, "read"); - for (auto const& key : accounts) - { - ++ltx.load(key).current().lastModifiedLedgerSeq; - } - } - - CLOG_WARNING(Ledger, "Writing accounts with unchanged signers"); - { - auto timer = getTimeScope(app, numSigners, "rewrite"); - ltx.commit(); - } - }; - - auto runTest = [&](Config::TestDbMode mode, size_t numAccounts, - uint32_t numSigners) { - VirtualClock clock; - Config cfg(getTestConfig(0, mode)); - cfg.ENTRY_CACHE_SIZE = 0; - Application::pointer app = createTestApplication(clock, cfg); - - CLOG_WARNING(Ledger, "Generating {} accounts with {} signers each", - numAccounts, numSigners); - auto accounts = generateEntries(numAccounts, numSigners); - auto keys = generateKeys(accounts); - - writeEntries(*app, numSigners, accounts); - readEntriesAndUpdateLastModified(*app, numSigners, keys); - - CLOG_WARNING(Ledger, "Done ({}, {}, {}, {})", - getTimeSpent(*app, numSigners, "create"), - getTimeSpent(*app, numSigners, "write"), - getTimeSpent(*app, numSigners, "read"), - getTimeSpent(*app, numSigners, "rewrite")); - }; - - auto runTests = [&](Config::TestDbMode mode) { - SECTION("0 signers") - { - runTest(mode, 100000, 0); - } - SECTION("10 signers") - { - runTest(mode, 100000, 10); - } - SECTION("20 signers") - { - runTest(mode, 100000, 20); - } - }; - - SECTION("sqlite") - { - runTests(Config::TESTDB_ON_DISK_SQLITE); - } - -#ifdef USE_POSTGRES - SECTION("postgresql") - { - runTests(Config::TESTDB_POSTGRESQL); - } -#endif -} - TEST_CASE("Load best offers benchmark", "[!hide][bestoffersbench]") { auto getTimeScope = [](Application& app, std::string const& phase) { @@ -3532,7 +3204,7 @@ TEST_CASE("Load best offers benchmark", "[!hide][bestoffersbench]") SECTION("sqlite") { - runTest(Config::TESTDB_ON_DISK_SQLITE, 10, 5, 25000); + runTest(Config::TESTDB_BUCKET_DB_PERSISTENT, 10, 5, 25000); } } @@ -3938,14 +3610,16 @@ TEST_CASE("LedgerTxn in memory order book", "[ledgertxn]") } }; - SECTION("default") + SECTION("bucketlist") { - runTest(Config::TESTDB_DEFAULT); + runTest(Config::TESTDB_BUCKET_DB_PERSISTENT); } - SECTION("sqlite") + // This mode is just used for testing, but we should still make sure it + // works + SECTION("in-memory") { - runTest(Config::TESTDB_ON_DISK_SQLITE); + runTest(Config::TESTDB_IN_MEMORY); } #ifdef USE_POSTGRES @@ -3956,7 +3630,7 @@ TEST_CASE("LedgerTxn in memory order book", "[ledgertxn]") #endif } -TEST_CASE_VERSIONS("LedgerTxn bulk-load offers", "[ledgertxn]") +TEST_CASE("Access deactivated entry", "[ledgertxn]") { auto runTest = [&](Config::TestDbMode mode) { VirtualClock clock; @@ -3966,47 +3640,6 @@ TEST_CASE_VERSIONS("LedgerTxn bulk-load offers", "[ledgertxn]") le1.data.type(OFFER); le1.data.offer() = LedgerTestUtils::generateValidOfferEntry(); - LedgerKey lk1 = LedgerEntryKey(le1); - auto lk2 = lk1; - lk2.offer().sellerID = - LedgerTestUtils::generateValidOfferEntry().sellerID; - - { - LedgerTxn ltx(app->getLedgerTxnRoot()); - ltx.create(le1); - ltx.commit(); - } - - for_all_versions(*app, [&]() { - app->getLedgerTxnRoot().prefetchClassic({lk1, lk2}); - LedgerTxn ltx(app->getLedgerTxnRoot()); - REQUIRE(ltx.load(lk1)); - }); - }; - - SECTION("sqlite") - { - runTest(Config::TESTDB_ON_DISK_SQLITE); - } - -#ifdef USE_POSTGRES - SECTION("postgresql") - { - runTest(Config::TESTDB_POSTGRESQL); - } -#endif -} - -TEST_CASE("Access deactivated entry", "[ledgertxn]") -{ - auto runTest = [&](Config::TestDbMode mode) { - VirtualClock clock; - auto app = createTestApplication(clock, getTestConfig(0, mode)); - - LedgerEntry le1; - le1.data.type(DATA); - le1.data.data() = LedgerTestUtils::generateValidDataEntry(); - LedgerKey lk1 = LedgerEntryKey(le1); { @@ -4122,14 +3755,14 @@ TEST_CASE("Access deactivated entry", "[ledgertxn]") } }; - SECTION("sqlite") + SECTION("bucketlist") { - runTest(Config::TESTDB_ON_DISK_SQLITE); + runTest(Config::TESTDB_BUCKET_DB_PERSISTENT); } SECTION("in-memory") { - runTest(Config::TESTDB_IN_MEMORY_NO_OFFERS); + runTest(Config::TESTDB_IN_MEMORY); } #ifdef USE_POSTGRES @@ -4185,7 +3818,7 @@ TEST_CASE("LedgerTxn generalized ledger entries", "[ledgertxn]") TEST_CASE("LedgerTxn best offers cache eviction", "[ledgertxn]") { VirtualClock clock; - auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS); + auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); auto app = createTestApplication(clock, cfg); auto buying = autocheck::generator()(UINT32_MAX); @@ -4402,7 +4035,7 @@ testPoolShareTrustLinesByAccountAndAsset( { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); for_versions_from(18, *app, [&] { testAtRoot(*app); }); } @@ -4411,7 +4044,7 @@ testPoolShareTrustLinesByAccountAndAsset( if (updates.size() > 1) { VirtualClock clock; - auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS); + auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.ENTRY_CACHE_SIZE = 0; auto app = createTestApplication(clock, cfg); @@ -4422,7 +4055,7 @@ testPoolShareTrustLinesByAccountAndAsset( { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); for_versions_from(18, *app, [&] { testPoolShareTrustLinesByAccountAndAsset( @@ -4450,7 +4083,7 @@ TEST_CASE_VERSIONS("LedgerTxn loadPoolShareTrustLinesByAccountAndAsset", { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); LedgerTxn ltx1(app->getLedgerTxnRoot()); LedgerTxn ltx2(ltx1); @@ -4463,7 +4096,7 @@ TEST_CASE_VERSIONS("LedgerTxn loadPoolShareTrustLinesByAccountAndAsset", { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); LedgerTxn ltx1(app->getLedgerTxnRoot()); ltx1.getDelta(); @@ -4534,7 +4167,7 @@ TEST_CASE_VERSIONS("LedgerTxn loadPoolShareTrustLinesByAccountAndAsset", TEST_CASE("InMemoryLedgerTxn simulate buckets", "[ledgertxn]") { VirtualClock clock; - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); auto app = createTestApplication(clock, cfg); @@ -4576,7 +4209,7 @@ TEST_CASE("InMemoryLedgerTxn simulate buckets", "[ledgertxn]") TEST_CASE("InMemoryLedgerTxn getOffersByAccountAndAsset", "[ledgertxn]") { VirtualClock clock; - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); auto app = createTestApplication(clock, cfg); @@ -4620,7 +4253,7 @@ TEST_CASE("InMemoryLedgerTxn getPoolShareTrustLinesByAccountAndAsset", "[ledgertxn]") { VirtualClock clock; - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); auto app = createTestApplication(clock, cfg); @@ -4669,7 +4302,7 @@ TEST_CASE_VERSIONS("InMemoryLedgerTxn close multiple ledgers with merges", "[ledgertxn]") { VirtualClock clock; - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); auto app = createTestApplication(clock, cfg); @@ -4693,7 +4326,7 @@ TEST_CASE_VERSIONS("InMemoryLedgerTxn close multiple ledgers with merges", TEST_CASE("InMemoryLedgerTxn filtering", "[ledgertxn]") { VirtualClock clock; - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); auto app = createTestApplication(clock, cfg); auto root = TestAccount::createRoot(*app); diff --git a/src/main/ApplicationImpl.cpp b/src/main/ApplicationImpl.cpp index ef2ced2f04..dc12cd4833 100644 --- a/src/main/ApplicationImpl.cpp +++ b/src/main/ApplicationImpl.cpp @@ -84,12 +84,8 @@ ApplicationImpl::ApplicationImpl(VirtualClock& clock, Config const& cfg) : mVirtualClock(clock) , mConfig(cfg) // Allocate one worker to eviction when background eviction enabled - , mWorkerIOContext(mConfig.isUsingBackgroundEviction() - ? mConfig.WORKER_THREADS - 1 - : mConfig.WORKER_THREADS) - , mEvictionIOContext(mConfig.isUsingBackgroundEviction() - ? std::make_unique(1) - : nullptr) + , mWorkerIOContext(mConfig.WORKER_THREADS - 1) + , mEvictionIOContext(std::make_unique(1)) , mWork(std::make_unique(mWorkerIOContext)) , mEvictionWork( mEvictionIOContext @@ -157,19 +153,16 @@ ApplicationImpl::ApplicationImpl(VirtualClock& clock, Config const& cfg) auto t = mConfig.WORKER_THREADS; LOG_DEBUG(DEFAULT_LOG, "Application constructing (worker threads: {})", t); - if (mConfig.isUsingBackgroundEviction()) - { - releaseAssert(mConfig.WORKER_THREADS > 0); - releaseAssert(mEvictionIOContext); + releaseAssert(mConfig.WORKER_THREADS > 0); + releaseAssert(mEvictionIOContext); - // Allocate one thread for Eviction scan - mEvictionThread = std::thread{[this]() { - runCurrentThreadWithMediumPriority(); - mEvictionIOContext->run(); - }}; + // Allocate one thread for Eviction scan + mEvictionThread = std::thread{[this]() { + runCurrentThreadWithMediumPriority(); + mEvictionIOContext->run(); + }}; - --t; - } + --t; while (t--) { @@ -190,92 +183,17 @@ ApplicationImpl::ApplicationImpl(VirtualClock& clock, Config const& cfg) static void maybeRebuildLedger(Application& app, bool applyBuckets) { - std::set toDrop; - std::set toRebuild; auto& ps = app.getPersistentState(); - auto bucketListDBEnabled = app.getConfig().isUsingBucketListDB(); - for (auto let : xdr::xdr_traits::enum_values()) - { - // If BucketListDB is enabled, drop all tables except for offers - LedgerEntryType t = static_cast(let); - if (let != OFFER && bucketListDBEnabled) - { - toDrop.emplace(t); - continue; - } - if (ps.shouldRebuildForType(t)) - { - toRebuild.emplace(t); - } - } - - if (!app.getConfig().MODE_USES_IN_MEMORY_LEDGER) + if (ps.shouldRebuildForOfferTable()) { app.getDatabase().clearPreparedStatementCache(); soci::transaction tx(app.getDatabase().getSession()); + LOG_INFO(DEFAULT_LOG, "Dropping offers"); + app.getLedgerTxnRoot().dropOffers(/*rebuild=*/true); - auto loopEntries = [&](auto const& entryTypeSet, bool shouldRebuild) { - for (auto let : entryTypeSet) - { - switch (let) - { - case ACCOUNT: - LOG_INFO(DEFAULT_LOG, "Dropping accounts"); - app.getLedgerTxnRoot().dropAccounts(shouldRebuild); - break; - case TRUSTLINE: - LOG_INFO(DEFAULT_LOG, "Dropping trustlines"); - app.getLedgerTxnRoot().dropTrustLines(shouldRebuild); - break; - case OFFER: - LOG_INFO(DEFAULT_LOG, "Dropping offers"); - app.getLedgerTxnRoot().dropOffers(shouldRebuild); - break; - case DATA: - LOG_INFO(DEFAULT_LOG, "Dropping accountdata"); - app.getLedgerTxnRoot().dropData(shouldRebuild); - break; - case CLAIMABLE_BALANCE: - LOG_INFO(DEFAULT_LOG, "Dropping claimablebalances"); - app.getLedgerTxnRoot().dropClaimableBalances(shouldRebuild); - break; - case LIQUIDITY_POOL: - LOG_INFO(DEFAULT_LOG, "Dropping liquiditypools"); - app.getLedgerTxnRoot().dropLiquidityPools(shouldRebuild); - break; - case CONTRACT_DATA: - LOG_INFO(DEFAULT_LOG, "Dropping contractdata"); - app.getLedgerTxnRoot().dropContractData(shouldRebuild); - break; - case CONTRACT_CODE: - LOG_INFO(DEFAULT_LOG, "Dropping contractcode"); - app.getLedgerTxnRoot().dropContractCode(shouldRebuild); - break; - case CONFIG_SETTING: - LOG_INFO(DEFAULT_LOG, "Dropping configsettings"); - app.getLedgerTxnRoot().dropConfigSettings(shouldRebuild); - break; - case TTL: - LOG_INFO(DEFAULT_LOG, "Dropping ttl"); - app.getLedgerTxnRoot().dropTTL(shouldRebuild); - break; - default: - abort(); - } - } - }; - - loopEntries(toRebuild, true); - loopEntries(toDrop, false); tx.commit(); - // Nothing to apply, exit early - if (toRebuild.empty()) - { - return; - } - // No transaction is needed. ApplyBucketsWork breaks the apply into many // small chunks, each of which has its own transaction. If it fails at // some point in the middle, then rebuildledger will not be cleared so @@ -284,10 +202,7 @@ maybeRebuildLedger(Application& app, bool applyBuckets) { LOG_INFO(DEFAULT_LOG, "Rebuilding ledger tables by applying buckets"); - auto filter = [&toRebuild](LedgerEntryType t) { - return toRebuild.find(t) != toRebuild.end(); - }; - if (!applyBucketsForLCL(app, filter)) + if (!applyBucketsForLCL(app)) { throw std::runtime_error("Could not rebuild ledger tables"); } @@ -295,10 +210,7 @@ maybeRebuildLedger(Application& app, bool applyBuckets) } } - for (auto let : toRebuild) - { - ps.clearRebuildForType(let); - } + ps.clearRebuildForOfferTable(); } void @@ -331,29 +243,29 @@ ApplicationImpl::initialize(bool createNewDB, bool forceRebuild) mBanManager = BanManager::create(*this); mStatusManager = std::make_unique(); - if (getConfig().MODE_USES_IN_MEMORY_LEDGER) + if (mConfig.ENTRY_CACHE_SIZE < 20000) { - resetLedgerState(); + LOG_WARNING(DEFAULT_LOG, + "ENTRY_CACHE_SIZE({}) is below the recommended minimum " + "of 20000", + mConfig.ENTRY_CACHE_SIZE); } - else - { - if (mConfig.ENTRY_CACHE_SIZE < 20000) - { - LOG_WARNING(DEFAULT_LOG, - "ENTRY_CACHE_SIZE({}) is below the recommended minimum " - "of 20000", - mConfig.ENTRY_CACHE_SIZE); - } - mLedgerTxnRoot = std::make_unique( - *this, mConfig.ENTRY_CACHE_SIZE, mConfig.PREFETCH_BATCH_SIZE + mLedgerTxnRoot = std::make_unique( + *this, mConfig.ENTRY_CACHE_SIZE, mConfig.PREFETCH_BATCH_SIZE #ifdef BEST_OFFER_DEBUGGING - , - mConfig.BEST_OFFER_DEBUGGING_ENABLED + , + mConfig.BEST_OFFER_DEBUGGING_ENABLED #endif - ); + ); - BucketListIsConsistentWithDatabase::registerInvariant(*this); +#ifdef BUILD_TESTS + if (getConfig().MODE_USES_IN_MEMORY_LEDGER) + { + resetLedgerState(); } +#endif + + BucketListIsConsistentWithDatabase::registerInvariant(*this); AccountSubEntriesCountIsValid::registerInvariant(*this); ConservationOfLumens::registerInvariant(*this); @@ -386,6 +298,7 @@ ApplicationImpl::initialize(bool createNewDB, bool forceRebuild) void ApplicationImpl::resetLedgerState() { +#ifdef BUILD_TESTS if (getConfig().MODE_USES_IN_MEMORY_LEDGER) { mNeverCommittingLedgerTxn.reset(); @@ -395,12 +308,13 @@ ApplicationImpl::resetLedgerState() #endif ); mNeverCommittingLedgerTxn = std::make_unique( - *mInMemoryLedgerTxnRoot, getDatabase()); + *mInMemoryLedgerTxnRoot, getDatabase(), mLedgerTxnRoot.get()); } else +#endif { auto& lsRoot = getLedgerTxnRoot(); - lsRoot.deleteObjectsModifiedOnOrAfterLedger(0); + lsRoot.deleteOffersModifiedOnOrAfterLedger(0); } } @@ -419,10 +333,7 @@ ApplicationImpl::upgradeToCurrentSchemaAndMaybeRebuildLedger(bool applyBuckets, if (forceRebuild) { auto& ps = getPersistentState(); - for (auto let : xdr::xdr_traits::enum_values()) - { - ps.setRebuildForType(static_cast(let)); - } + ps.setRebuildForOfferTable(); } mDatabase->upgradeToCurrentSchema(); @@ -740,26 +651,13 @@ ApplicationImpl::validateAndLogConfig() "RUN_STANDALONE is not set"); } - // EXPERIMENTAL_PRECAUTION_DELAY_META is only meaningful when there's a - // METADATA_OUTPUT_STREAM. We only allow EXPERIMENTAL_PRECAUTION_DELAY_META - // on a captive core, without a persistent database; old-style ingestion - // which reads from the core database could do the delaying itself. - if (mConfig.METADATA_OUTPUT_STREAM != "" && - mConfig.EXPERIMENTAL_PRECAUTION_DELAY_META && !mConfig.isInMemoryMode()) + if (mConfig.METADATA_OUTPUT_STREAM == "" && + mConfig.EXPERIMENTAL_PRECAUTION_DELAY_META) { - throw std::invalid_argument( - "Using a METADATA_OUTPUT_STREAM with " - "EXPERIMENTAL_PRECAUTION_DELAY_META set to true " - "requires --in-memory"); + CLOG_WARNING(Tx, "EXPERIMENTAL_PRECAUTION_DELAY_META is ignored " + "because METADATA_OUTPUT_STREAM is not set"); } - if (mConfig.isInMemoryMode()) - { - CLOG_WARNING( - Bucket, - "in-memory mode is enabled. This feature is deprecated! Node " - "may see performance degredation and lose sync with the network."); - } if (!mDatabase->isSqlite()) { CLOG_WARNING(Database, @@ -768,86 +666,35 @@ ApplicationImpl::validateAndLogConfig() "release. Please use sqlite3 for non-ledger state data."); } - if (mConfig.DEPRECATED_SQL_LEDGER_STATE) + auto pageSizeExp = mConfig.BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT; + if (pageSizeExp != 0) { - if (mPersistentState->getState(PersistentState::kDBBackend) == - BucketIndex::DB_BACKEND_STATE) + // If the page size is less than 256 bytes, it is essentially + // indexing individual keys, so page size should be set to 0 + // instead. + if (pageSizeExp < 8) { throw std::invalid_argument( - "To downgrade to DEPRECATED_SQL_LEDGER_STATE, run " - "stellar-core new-db."); - } - - CLOG_WARNING( - Bucket, - "SQL for ledger state is enabled. This feature is deprecated! Node " - "may see performance degredation and lose sync with the network."); - } - else - { - if (mConfig.isUsingBucketListDB()) - { - mPersistentState->setState(PersistentState::kDBBackend, - BucketIndex::DB_BACKEND_STATE); - auto pageSizeExp = mConfig.BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT; - if (pageSizeExp != 0) - { - // If the page size is less than 256 bytes, it is essentially - // indexing individual keys, so page size should be set to 0 - // instead. - if (pageSizeExp < 8) - { - throw std::invalid_argument( - "BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT " - "must be at least 8 or set to 0 for individual entry " - "indexing"); - } - - // Check if pageSize will cause overflow - if (pageSizeExp > 31) - { - throw std::invalid_argument( - "BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT " - "must be less than 32"); - } - } - - CLOG_INFO(Bucket, - "BucketListDB enabled: pageSizeExponent: {} indexCutOff: " - "{}MB, persist indexes: {}", - pageSizeExp, mConfig.BUCKETLIST_DB_INDEX_CUTOFF, - mConfig.isPersistingBucketListDBIndexes()); + "BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT " + "must be at least 8 or set to 0 for individual entry " + "indexing"); } - else - { - CLOG_WARNING( - Bucket, - "DEPRECATED_SQL_LEDGER_STATE set to false but " - "deprecated SQL ledger state is active. To disable deprecated " - "SQL ledger state, " - "MODE_ENABLES_BUCKETLIST must be set and --in-memory flag " - "must not be used."); - } - } - if (mConfig.BACKGROUND_EVICTION_SCAN) - { - if (!mConfig.isUsingBucketListDB()) + // Check if pageSize will cause overflow + if (pageSizeExp > 31) { throw std::invalid_argument( - "BACKGROUND_EVICTION_SCAN set to true but " - "DEPRECATED_SQL_LEDGER_STATE is set to true. " - "DEPRECATED_SQL_LEDGER_STATE must be set to false to enable " - "background eviction."); - } - - if (mConfig.WORKER_THREADS < 2) - { - throw std::invalid_argument("BACKGROUND_EVICTION_SCAN requires " - "WORKER_THREADS > 1"); + "BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT " + "must be less than 32"); } } + CLOG_INFO(Bucket, + "BucketListDB enabled: pageSizeExponent: {} indexCutOff: " + "{}MB, persist indexes: {}", + pageSizeExp, mConfig.BUCKETLIST_DB_INDEX_CUTOFF, + mConfig.BUCKETLIST_DB_PERSIST_INDEX); + if (mConfig.HTTP_QUERY_PORT != 0) { if (isNetworkedValidator) @@ -863,13 +710,6 @@ ApplicationImpl::validateAndLogConfig() "HTTP_QUERY_PORT must be different from HTTP_PORT"); } - if (!mConfig.isUsingBucketListDB()) - { - throw std::invalid_argument( - "HTTP_QUERY_PORT requires DEPRECATED_SQL_LEDGER_STATE to be " - "false"); - } - if (mConfig.QUERY_THREAD_POOL_SIZE == 0) { throw std::invalid_argument( @@ -877,13 +717,6 @@ ApplicationImpl::validateAndLogConfig() } } - if (isNetworkedValidator && mConfig.isInMemoryMode()) - { - throw std::invalid_argument( - "In-memory mode is set, NODE_IS_VALIDATOR is set, " - "and RUN_STANDALONE is not set"); - } - if (getHistoryArchiveManager().hasAnyWritableHistoryArchive()) { if (!mConfig.modeStoresAllHistory()) @@ -1632,7 +1465,14 @@ AbstractLedgerTxnParent& ApplicationImpl::getLedgerTxnRoot() { releaseAssert(threadIsMain()); - return mConfig.MODE_USES_IN_MEMORY_LEDGER ? *mNeverCommittingLedgerTxn - : *mLedgerTxnRoot; + +#ifdef BUILD_TESTS + if (mConfig.MODE_USES_IN_MEMORY_LEDGER) + { + return *mNeverCommittingLedgerTxn; + } +#endif + + return *mLedgerTxnRoot; } } diff --git a/src/main/ApplicationImpl.h b/src/main/ApplicationImpl.h index d195e04774..4dcc71266c 100644 --- a/src/main/ApplicationImpl.h +++ b/src/main/ApplicationImpl.h @@ -189,8 +189,10 @@ class ApplicationImpl : public Application // is held in the never-committing LedgerTxn in its entirety -- so if it // ever grows beyond RAM-size you need to use a mode with some sort of // database on secondary storage. +#ifdef BUILD_TESTS std::unique_ptr mInMemoryLedgerTxnRoot; std::unique_ptr mNeverCommittingLedgerTxn; +#endif std::unique_ptr mCommandHandler; diff --git a/src/main/ApplicationUtils.cpp b/src/main/ApplicationUtils.cpp index 5f33fce2a5..c89559e56a 100644 --- a/src/main/ApplicationUtils.cpp +++ b/src/main/ApplicationUtils.cpp @@ -118,79 +118,9 @@ minimalDbPath(Config const& cfg) return dpath; } -void -setupMinimalDBForInMemoryMode(Config const& cfg, uint32_t startAtLedger) -{ - releaseAssertOrThrow(cfg.isInMemoryMode()); - - VirtualClock clock; - Application::pointer app; - - // Look for an existing minimal database, and see if it's possible to - // restore ledger state from buckets. If it is not possible, reset the - // existing database back to genesis. If the minimal database does not - // exist, create a new one. - bool found = false; - - auto cfgToCheckDB = cfg; - cfgToCheckDB.METADATA_OUTPUT_STREAM = ""; - - if (std::filesystem::exists(minimalDbPath(cfg))) - { - app = Application::create(clock, cfgToCheckDB, /* newDB */ false); - found = true; - } - else - { - LOG_INFO(DEFAULT_LOG, "Minimal database not found, creating one..."); - app = Application::create(clock, cfgToCheckDB, /* newDB */ true); - } - - // Rebuild the state from scratch if: - // - --start-at-ledger was not provided - // - target catchup ledger is before LCL - // - target catchup ledger is too far ahead of LCL - // In all other cases, attempt restoring the ledger states via - // local bucket application - if (found) - { - LOG_INFO(DEFAULT_LOG, "Found the existing minimal database"); - - // DB state might be set to 0 if core previously exited while rebuilding - // state. In this case, we want to rebuild the DB from scratch - bool rebuildDB = - app->getLedgerManager().getLastClosedLedgerHAS().currentLedger < - LedgerManager::GENESIS_LEDGER_SEQ; - - if (!rebuildDB) - { - // Ledger state is not yet ready during this setup step - app->getLedgerManager().loadLastKnownLedger( - /* restoreBucketlist */ false, /* isLedgerStateReady */ false); - auto lcl = app->getLedgerManager().getLastClosedLedgerNum(); - LOG_INFO(DEFAULT_LOG, "Current in-memory state, got LCL: {}", lcl); - rebuildDB = - !canRebuildInMemoryLedgerFromBuckets(startAtLedger, lcl); - } - - if (rebuildDB) - { - LOG_INFO(DEFAULT_LOG, "Cannot restore the in-memory state, " - "rebuilding the state from scratch"); - app->resetDBForInMemoryMode(); - } - } -} - Application::pointer -setupApp(Config& cfg, VirtualClock& clock, uint32_t startAtLedger, - std::string const& startAtHash) +setupApp(Config& cfg, VirtualClock& clock) { - if (cfg.isInMemoryMode()) - { - setupMinimalDBForInMemoryMode(cfg, startAtLedger); - } - LOG_INFO(DEFAULT_LOG, "Starting stellar-core {}", STELLAR_CORE_VERSION); Application::pointer app; app = Application::create(clock, cfg, false); @@ -202,10 +132,10 @@ setupApp(Config& cfg, VirtualClock& clock, uint32_t startAtLedger, // With in-memory mode, ledger state is not yet ready during this setup step app->getLedgerManager().loadLastKnownLedger( /* restoreBucketlist */ false, - /* isLedgerStateReady */ !cfg.isInMemoryMode()); + /* isLedgerStateReady */ !cfg.MODE_USES_IN_MEMORY_LEDGER); auto lcl = app->getLedgerManager().getLastClosedLedgerHeader(); - if (cfg.isInMemoryMode() && + if (cfg.MODE_USES_IN_MEMORY_LEDGER && lcl.header.ledgerSeq == LedgerManager::GENESIS_LEDGER_SEQ) { // If ledger is genesis, rebuild genesis state from buckets @@ -215,67 +145,6 @@ setupApp(Config& cfg, VirtualClock& clock, uint32_t startAtLedger, } } - bool doCatchupForInMemoryMode = - cfg.isInMemoryMode() && startAtLedger != 0 && !startAtHash.empty(); - if (doCatchupForInMemoryMode) - { - // At this point, setupApp has either confirmed that we can rebuild from - // the existing buckets, or reset the DB to genesis - if (lcl.header.ledgerSeq != LedgerManager::GENESIS_LEDGER_SEQ) - { - auto lclHashStr = binToHex(lcl.hash); - if (lcl.header.ledgerSeq == startAtLedger && - lclHashStr != startAtHash) - { - LOG_ERROR(DEFAULT_LOG, - "Provided hash {} does not agree with stored hash {}", - startAtHash, lclHashStr); - return nullptr; - } - - auto has = app->getLedgerManager().getLastClosedLedgerHAS(); - - // Collect bucket references to pass to catchup _before_ starting - // the app, which may trigger garbage collection - std::set> retained; - for (auto const& b : has.allBuckets()) - { - auto bPtr = - app->getBucketManager().getBucketByHash(hexToBin256(b)); - releaseAssert(bPtr); - retained.insert(bPtr); - } - - // Start the app with LCL set to 0 - app->getLedgerManager().setupInMemoryStateRebuild(); - app->start(); - - // Set Herder to track the actual LCL - app->getHerder().setTrackingSCPState(lcl.header.ledgerSeq, - lcl.header.scpValue, true); - - // Schedule the catchup work that will rebuild state - auto cc = CatchupConfiguration(has, lcl); - app->getLedgerManager().startCatchup(cc, /* archive */ nullptr, - retained); - } - else - { - LedgerNumHashPair pair; - pair.first = startAtLedger; - pair.second = std::optional(hexToBin256(startAtHash)); - auto mode = CatchupConfiguration::Mode::OFFLINE_BASIC; - Json::Value catchupInfo; - int res = - catchup(app, CatchupConfiguration{pair, 0, mode}, catchupInfo, - /* archive */ nullptr); - if (res != 0) - { - return nullptr; - } - } - } - return app; } @@ -314,8 +183,7 @@ runApp(Application::pointer app) } bool -applyBucketsForLCL(Application& app, - std::function onlyApply) +applyBucketsForLCL(Application& app) { auto has = app.getLedgerManager().getLastClosedLedgerHAS(); auto lclHash = @@ -329,9 +197,9 @@ applyBucketsForLCL(Application& app, maxProtocolVersion = currentLedger->ledgerVersion; } - std::map> buckets; + std::map> buckets; auto work = app.getWorkScheduler().scheduleWork( - buckets, has, maxProtocolVersion, onlyApply); + buckets, has, maxProtocolVersion); while (app.getClock().crank(true) && !work->isDone()) ; @@ -339,12 +207,6 @@ applyBucketsForLCL(Application& app, return work->getState() == BasicWork::State::WORK_SUCCESS; } -bool -applyBucketsForLCL(Application& app) -{ - return applyBucketsForLCL(app, [](LedgerEntryType) { return true; }); -} - void httpCommand(std::string const& command, unsigned short port) { @@ -574,11 +436,11 @@ struct StateArchivalMetric static void processArchivalMetrics( - std::shared_ptr const b, + std::shared_ptr const b, UnorderedMap& ledgerEntries, UnorderedMap>& ttls) { - for (BucketInputIterator in(b); in; ++in) + for (LiveBucketInputIterator in(b); in; ++in) { auto const& be = *in; bool isDead = be.type() == DEADENTRY; @@ -647,7 +509,7 @@ dumpStateArchivalStatistics(Config cfg) HistoryArchiveState has = lm.getLastClosedLedgerHAS(); std::vector hashes; - for (uint32_t i = 0; i < BucketList::kNumLevels; ++i) + for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i) { HistoryStateBucket const& hsb = has.currentBuckets.at(i); hashes.emplace_back(hexToBin256(hsb.curr)); @@ -665,7 +527,7 @@ dumpStateArchivalStatistics(Config cfg) { continue; } - auto b = bm.getBucketByHash(hash); + auto b = bm.getLiveBucketByHash(hash); if (!b) { throw std::runtime_error(std::string("missing bucket: ") + @@ -720,7 +582,7 @@ dumpStateArchivalStatistics(Config cfg) } } - CLOG_INFO(Bucket, "BucketList total bytes: {}", blSize); + CLOG_INFO(Bucket, "Live BucketList total bytes: {}", blSize); CLOG_INFO(Bucket, "Live Temporary Entries: Newest bytes {} ({}%), Outdated bytes " "{} ({}%)", @@ -929,7 +791,7 @@ loadXdr(Config cfg, std::string const& bucketFile) Application::pointer app = Application::create(clock, cfg, false); uint256 zero; - Bucket bucket(bucketFile, zero, nullptr); + LiveBucket bucket(bucketFile, zero, nullptr); bucket.apply(*app); } diff --git a/src/main/ApplicationUtils.h b/src/main/ApplicationUtils.h index 30d2cb0fed..ac0848bdb6 100644 --- a/src/main/ApplicationUtils.h +++ b/src/main/ApplicationUtils.h @@ -15,9 +15,7 @@ namespace stellar class CatchupConfiguration; // Create application and validate its configuration -Application::pointer setupApp(Config& cfg, VirtualClock& clock, - uint32_t startAtLedger, - std::string const& startAtHash); +Application::pointer setupApp(Config& cfg, VirtualClock& clock); int runApp(Application::pointer app); void setForceSCPFlag(); void initializeDatabase(Config cfg); @@ -57,8 +55,6 @@ int catchup(Application::pointer app, CatchupConfiguration cc, // Reduild ledger state based on the buckets. Ensure ledger state is properly // reset before calling this function. bool applyBucketsForLCL(Application& app); -bool applyBucketsForLCL(Application& app, - std::function onlyApply); int publish(Application::pointer app); std::string minimalDBForInMemoryMode(Config const& cfg); bool canRebuildInMemoryLedgerFromBuckets(uint32_t startAtLedger, uint32_t lcl); diff --git a/src/main/CommandHandler.cpp b/src/main/CommandHandler.cpp index fd8d9e3034..92e9cf5691 100644 --- a/src/main/CommandHandler.cpp +++ b/src/main/CommandHandler.cpp @@ -82,13 +82,17 @@ CommandHandler::CommandHandler(Application& app) : mApp(app) app.getClock().getIOContext(), ipStr, mApp.getConfig().HTTP_PORT, httpMaxClient); - if (mApp.getConfig().HTTP_QUERY_PORT && - mApp.getConfig().isUsingBucketListDB()) + if (mApp.getConfig().HTTP_QUERY_PORT) { mQueryServer = std::make_unique( ipStr, mApp.getConfig().HTTP_QUERY_PORT, httpMaxClient, mApp.getConfig().QUERY_THREAD_POOL_SIZE, - mApp.getBucketManager().getBucketSnapshotManager()); + mApp.getBucketManager().getBucketSnapshotManager() +#ifdef BUILD_TESTS + , + mApp.getConfig() +#endif + ); } } else diff --git a/src/main/CommandLine.cpp b/src/main/CommandLine.cpp index c5aada5c6b..842de40f2e 100644 --- a/src/main/CommandLine.cpp +++ b/src/main/CommandLine.cpp @@ -334,54 +334,6 @@ maybeSetMetadataOutputStream(Config& cfg, std::string const& stream) } } -void -maybeEnableInMemoryMode(Config& config, bool inMemory, uint32_t startAtLedger, - std::string const& startAtHash, bool persistMinimalData) -{ - // First, ensure user parameters are valid - if (!inMemory) - { - if (startAtLedger != 0) - { - throw std::runtime_error("--start-at-ledger requires --in-memory"); - } - if (!startAtHash.empty()) - { - throw std::runtime_error("--start-at-hash requires --in-memory"); - } - return; - } - if (startAtLedger != 0 && startAtHash.empty()) - { - throw std::runtime_error("--start-at-ledger requires --start-at-hash"); - } - else if (startAtLedger == 0 && !startAtHash.empty()) - { - throw std::runtime_error("--start-at-hash requires --start-at-ledger"); - } - - // Adjust configs for live in-memory-replay mode - config.setInMemoryMode(); - - if (startAtLedger != 0 && !startAtHash.empty()) - { - config.MODE_AUTO_STARTS_OVERLAY = false; - } - - // Set database to a small sqlite database used to store minimal data needed - // to restore the ledger state - if (persistMinimalData) - { - config.DATABASE = SecretValue{minimalDBForInMemoryMode(config)}; - config.MODE_STORES_HISTORY_LEDGERHEADERS = true; - // Since this mode stores historical data (needed to restore - // ledger state in certain scenarios), set maintenance to run - // aggressively so that we only store a few ledgers worth of data - config.AUTOMATIC_MAINTENANCE_PERIOD = std::chrono::seconds(30); - config.AUTOMATIC_MAINTENANCE_COUNT = MAINTENANCE_LEDGER_COUNT; - } -} - clara::Opt ledgerHashParser(std::string& ledgerHash) { @@ -396,29 +348,6 @@ forceUntrustedCatchup(bool& force) "force unverified catchup"); } -clara::Opt -inMemoryParser(bool& inMemory) -{ - return clara::Opt{inMemory}["--in-memory"]( - "(DEPRECATED) store working ledger in memory rather than database"); -} - -clara::Opt -startAtLedgerParser(uint32_t& startAtLedger) -{ - return clara::Opt{startAtLedger, "LEDGER"}["--start-at-ledger"]( - "(DEPRECATED) start in-memory run with replay from historical ledger " - "number"); -} - -clara::Opt -startAtHashParser(std::string& startAtHash) -{ - return clara::Opt{startAtHash, "HASH"}["--start-at-hash"]( - "(DEPRECATED) start in-memory run with replay from historical ledger " - "hash"); -} - clara::Opt filterQueryParser(std::optional& filterQuery) { @@ -857,8 +786,8 @@ runCatchup(CommandLineArgs const& args) catchupArchiveParser, trustedCheckpointHashesParser(trustedCheckpointHashesFile), outputFileParser(outputFile), disableBucketGCParser(disableBucketGC), - validationParser(completeValidation), inMemoryParser(inMemory), - ledgerHashParser(hash), forceUntrustedCatchup(forceUntrusted), + validationParser(completeValidation), ledgerHashParser(hash), + forceUntrustedCatchup(forceUntrusted), metadataOutputStreamParser(stream), forceBackParser(forceBack)}, [&] { auto config = configOption.getConfig(); @@ -879,10 +808,6 @@ runCatchup(CommandLineArgs const& args) config.AUTOMATIC_MAINTENANCE_COUNT = MAINTENANCE_LEDGER_COUNT; } - // --start-at-ledger and --start-at-hash aren't allowed in catchup, - // so pass defaults values - maybeEnableInMemoryMode(config, inMemory, 0, "", - /* persistMinimalData */ false); maybeSetMetadataOutputStream(config, stream); VirtualClock clock(VirtualClock::REAL_TIME); @@ -1024,9 +949,8 @@ runWriteVerifiedCheckpointHashes(CommandLineArgs const& args) VirtualClock clock(VirtualClock::REAL_TIME); auto cfg = configOption.getConfig(); - // Set up for quick in-memory no-catchup mode. + // Set up for quick no-catchup mode. cfg.QUORUM_INTERSECTION_CHECKER = false; - cfg.setInMemoryMode(); cfg.MODE_DOES_CATCHUP = false; auto app = Application::create(clock, cfg, false); @@ -1226,25 +1150,13 @@ int runNewDB(CommandLineArgs const& args) { CommandLine::ConfigOption configOption; - bool minimalForInMemoryMode = false; - - auto minimalDBParser = [](bool& minimalForInMemoryMode) { - return clara::Opt{ - minimalForInMemoryMode}["--minimal-for-in-memory-mode"]( - "Reset the special database used only for in-memory mode (see " - "--in-memory flag"); - }; return runWithHelp(args, - {configurationParser(configOption), - minimalDBParser(minimalForInMemoryMode)}, + { + configurationParser(configOption), + }, [&] { auto cfg = configOption.getConfig(); - if (minimalForInMemoryMode) - { - cfg.DATABASE = - SecretValue{minimalDBForInMemoryMode(cfg)}; - } initializeDatabase(cfg); return 0; }); @@ -1520,18 +1432,14 @@ run(CommandLineArgs const& args) CommandLine::ConfigOption configOption; auto disableBucketGC = false; std::string stream; - bool inMemory = false; bool waitForConsensus = false; - uint32_t startAtLedger = 0; - std::string startAtHash; return runWithHelp( args, {configurationParser(configOption), disableBucketGCParser(disableBucketGC), - metadataOutputStreamParser(stream), inMemoryParser(inMemory), - waitForConsensusParser(waitForConsensus), - startAtLedgerParser(startAtLedger), startAtHashParser(startAtHash)}, + metadataOutputStreamParser(stream), + waitForConsensusParser(waitForConsensus)}, [&] { Config cfg; std::shared_ptr clock; @@ -1549,14 +1457,10 @@ run(CommandLineArgs const& args) { cfg.DATABASE = SecretValue{"sqlite3://:memory:"}; cfg.MODE_STORES_HISTORY_MISC = false; - cfg.MODE_USES_IN_MEMORY_LEDGER = false; cfg.MODE_ENABLES_BUCKETLIST = false; cfg.PREFETCH_BATCH_SIZE = 0; } - maybeEnableInMemoryMode(cfg, inMemory, startAtLedger, - startAtHash, - /* persistMinimalData */ true); maybeSetMetadataOutputStream(cfg, stream); cfg.FORCE_SCP = cfg.NODE_IS_VALIDATOR ? !waitForConsensus : false; @@ -1600,7 +1504,7 @@ run(CommandLineArgs const& args) // Note that when in in-memory mode, additional setup may be // required (such as database reset, catchup, etc) clock = std::make_shared(clockMode); - app = setupApp(cfg, *clock, startAtLedger, startAtHash); + app = setupApp(cfg, *clock); if (!app) { LOG_ERROR(DEFAULT_LOG, diff --git a/src/main/Config.cpp b/src/main/Config.cpp index 58c1eb8dc8..8c67bce4fb 100644 --- a/src/main/Config.cpp +++ b/src/main/Config.cpp @@ -121,7 +121,6 @@ Config::Config() : NODE_SEED(SecretKey::random()) // non configurable MODE_ENABLES_BUCKETLIST = true; - MODE_USES_IN_MEMORY_LEDGER = false; MODE_STORES_HISTORY_MISC = true; MODE_STORES_HISTORY_LEDGERHEADERS = true; MODE_DOES_CATCHUP = true; @@ -163,11 +162,9 @@ Config::Config() : NODE_SEED(SecretKey::random()) CATCHUP_RECENT = 0; EXPERIMENTAL_PRECAUTION_DELAY_META = false; EXPERIMENTAL_BACKGROUND_OVERLAY_PROCESSING = false; - DEPRECATED_SQL_LEDGER_STATE = false; BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT = 14; // 2^14 == 16 kb BUCKETLIST_DB_INDEX_CUTOFF = 20; // 20 mb BUCKETLIST_DB_PERSIST_INDEX = true; - BACKGROUND_EVICTION_SCAN = true; PUBLISH_TO_ARCHIVE_DELAY = std::chrono::seconds{0}; // automatic maintenance settings: // short and prime with 1 hour which will cause automatic maintenance to @@ -310,6 +307,7 @@ Config::Config() : NODE_SEED(SecretKey::random()) #ifdef BUILD_TESTS TEST_CASES_ENABLED = false; + MODE_USES_IN_MEMORY_LEDGER = false; #endif #ifdef BEST_OFFER_DEBUGGING @@ -1069,28 +1067,37 @@ Config::processConfig(std::shared_ptr t) EXPERIMENTAL_BACKGROUND_OVERLAY_PROCESSING = readBool(item); }}, + // TODO: Flags are no longer supported, remove in next release. {"BACKGROUND_EVICTION_SCAN", - [&]() { BACKGROUND_EVICTION_SCAN = readBool(item); }}, - // TODO: Flag is no longer supported, remove in next release. + [&]() { + CLOG_WARNING( + Bucket, + "BACKGROUND_EVICTION_SCAN is deprecated and ignored. " + "Please remove this from config"); + }}, {"EXPERIMENTAL_BACKGROUND_EVICTION_SCAN", [&]() { CLOG_WARNING( Bucket, "EXPERIMENTAL_BACKGROUND_EVICTION_SCAN is deprecated " "and " - "is ignored. Use BACKGROUND_EVICTION_SCAN instead"); + "is ignored. Please remove from config"); }}, {"DEPRECATED_SQL_LEDGER_STATE", - [&]() { DEPRECATED_SQL_LEDGER_STATE = readBool(item); }}, + [&]() { + CLOG_WARNING( + Bucket, + "DEPRECATED_SQL_LEDGER_STATE is deprecated and " + "ignored. Please remove from config"); + }}, // Still support EXPERIMENTAL_BUCKETLIST_DB* flags for // captive-core for 21.0 release, remove in 21.1 release {"EXPERIMENTAL_BUCKETLIST_DB", [&]() { - DEPRECATED_SQL_LEDGER_STATE = !readBool(item); CLOG_WARNING( Bucket, - "EXPERIMENTAL_BUCKETLIST_DB flag is deprecated, " - "use DEPRECATED_SQL_LEDGER_STATE=false instead."); + "EXPERIMENTAL_BUCKETLIST_DB flag is deprecated. " + "please remove from config"); }}, {"EXPERIMENTAL_BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT", [&]() { @@ -1568,8 +1575,8 @@ Config::processConfig(std::shared_ptr t) }}, {"TESTING_STARTING_EVICTION_SCAN_LEVEL", [&]() { - TESTING_STARTING_EVICTION_SCAN_LEVEL = - readInt(item, 1, BucketList::kNumLevels - 1); + TESTING_STARTING_EVICTION_SCAN_LEVEL = readInt( + item, 1, LiveBucketList::kNumLevels - 1); }}, {"TESTING_MAX_ENTRIES_TO_ARCHIVE", [&]() { @@ -1610,6 +1617,16 @@ Config::processConfig(std::shared_ptr t) [&]() { EMIT_SOROBAN_TRANSACTION_META_EXT_V1 = readBool(item); }}, +#ifdef BUILD_TESTS + {"ARTIFICIALLY_SIMULATE_ARCHIVE_FILTER_MISS", + [&]() { + ARTIFICIALLY_SIMULATE_ARCHIVE_FILTER_MISS = readBool(item); + }}, + {"REQUIRE_PROOFS_FOR_ALL_EVICTED_ENTRIES", + [&]() { + REQUIRE_PROOFS_FOR_ALL_EVICTED_ENTRIES = readBool(item); + }}, +#endif {"EMIT_LEDGER_CLOSE_META_EXT_V1", [&]() { EMIT_LEDGER_CLOSE_META_EXT_V1 = readBool(item); }}}; @@ -1689,33 +1706,11 @@ Config::processConfig(std::shared_ptr t) // Validators default to starting the network from local state FORCE_SCP = NODE_IS_VALIDATOR; - // Require either DEPRECATED_SQL_LEDGER_STATE or - // EXPERIMENTAL_BUCKETLIST_DB to be backwards compatible with horizon - // and RPC, but do not allow both. - if (!t->contains("DEPRECATED_SQL_LEDGER_STATE") && - !t->contains("EXPERIMENTAL_BUCKETLIST_DB")) - { - std::string msg = - "Invalid configuration: " - "DEPRECATED_SQL_LEDGER_STATE not set. Default setting is FALSE " - "and is appropriate for most nodes."; - throw std::runtime_error(msg); - } // Only allow one version of all BucketListDB flags, either the // deprecated flag or new flag, but not both. - else if (t->contains("DEPRECATED_SQL_LEDGER_STATE") && - t->contains("EXPERIMENTAL_BUCKETLIST_DB")) - { - std::string msg = - "Invalid configuration: EXPERIMENTAL_BUCKETLIST_DB and " - "DEPRECATED_SQL_LEDGER_STATE must not both be set. " - "EXPERIMENTAL_BUCKETLIST_DB is deprecated, use " - "DEPRECATED_SQL_LEDGER_STATE only."; - throw std::runtime_error(msg); - } - else if (t->contains( - "EXPERIMENTAL_BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT") && - t->contains("BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT")) + if (t->contains( + "EXPERIMENTAL_BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT") && + t->contains("BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT")) { std::string msg = "Invalid configuration: " @@ -1748,14 +1743,6 @@ Config::processConfig(std::shared_ptr t) throw std::runtime_error(msg); } - // If DEPRECATED_SQL_LEDGER_STATE is set to false and - // BACKGROUND_EVICTION_SCAN is not set, override default value to false - // so that nodes still running SQL ledger don't crash on startup - if (!isUsingBucketListDB() && !t->contains("BACKGROUND_EVICTION_SCAN")) - { - BACKGROUND_EVICTION_SCAN = false; - } - // process elements that potentially depend on others if (t->contains("VALIDATORS")) { @@ -2263,54 +2250,12 @@ Config::getExpectedLedgerCloseTime() const return Herder::EXP_LEDGER_TIMESPAN_SECONDS; } -void -Config::setInMemoryMode() -{ - MODE_USES_IN_MEMORY_LEDGER = true; - DATABASE = SecretValue{"sqlite3://:memory:"}; - MODE_STORES_HISTORY_MISC = false; - MODE_STORES_HISTORY_LEDGERHEADERS = false; - MODE_ENABLES_BUCKETLIST = true; - BACKGROUND_EVICTION_SCAN = false; -} - bool Config::modeDoesCatchupWithBucketList() const { return MODE_DOES_CATCHUP && MODE_ENABLES_BUCKETLIST; } -bool -Config::isInMemoryMode() const -{ - return MODE_USES_IN_MEMORY_LEDGER; -} - -bool -Config::isUsingBucketListDB() const -{ - return !DEPRECATED_SQL_LEDGER_STATE && !MODE_USES_IN_MEMORY_LEDGER && - MODE_ENABLES_BUCKETLIST; -} - -bool -Config::isUsingBackgroundEviction() const -{ - return isUsingBucketListDB() && BACKGROUND_EVICTION_SCAN; -} - -bool -Config::isPersistingBucketListDBIndexes() const -{ - return isUsingBucketListDB() && BUCKETLIST_DB_PERSIST_INDEX; -} - -bool -Config::isInMemoryModeWithoutMinimalDB() const -{ - return MODE_USES_IN_MEMORY_LEDGER && !MODE_STORES_HISTORY_LEDGERHEADERS; -} - bool Config::modeStoresAllHistory() const { diff --git a/src/main/Config.h b/src/main/Config.h index 600a349259..f8685a2b72 100644 --- a/src/main/Config.h +++ b/src/main/Config.h @@ -143,31 +143,24 @@ class Config : public std::enable_shared_from_this // via applying valid TXs or manually adding entries to the BucketList. // BucketList state is not preserved over restarts. If this mode can be // used, it should be. - // 2. TESTDB_IN_MEMORY_NO_OFFERS: allows arbitrary ledger state writes via - // ltx root commits, but does not test the offers table. Suitable for + // 2. TESTDB_IN_MEMORY: allows arbitrary ledger state writes via + // ltx root commits. Suitable for // tests that required writes to the ledger state that cannot be achieved // via valid TX application, such as testing invalid TX error codes or // low level op testing. - // 3. TESTDB_IN_MEMORY_OFFERS: The same as TESTDB_IN_MEMORY_NO_OFFERS, but - // tests the offers table. Suitable for testing ops that interact with - // offers. - // 4. TESTDB_ON_DISK_SQLITE: Should only be used to test SQLITE specific + // 3. TESTDB_POSTGRESQL: Should only be used to test POSTGRESQL specific // database operations. - // 5. TESTDB_POSTGRESQL: Should only be used to test POSTGRESQL specific - // database operations. - // 6. TESTDB_BUCKET_DB_PERSISTENT: Same as TESTDB_BUCKET_DB_VOLATILE, but - // persists the BucketList over restart. This mode is very slow and - // should only be used for testing restart behavior or some low level - // BucketList features. + // 4. TESTDB_BUCKET_DB_PERSISTENT: Same as TESTDB_BUCKET_DB_VOLATILE, but + // persists the BucketList and SQL DB over restart. This mode is very + // slow and should only be used for testing restart behavior or some low + // level BucketList features or for testing SQLite DB specific behavior. enum TestDbMode { TESTDB_DEFAULT, - TESTDB_IN_MEMORY_OFFERS, - TESTDB_ON_DISK_SQLITE, + TESTDB_IN_MEMORY, #ifdef USE_POSTGRES TESTDB_POSTGRESQL, #endif - TESTDB_IN_MEMORY_NO_OFFERS, TESTDB_BUCKET_DB_VOLATILE, TESTDB_BUCKET_DB_PERSISTENT, TESTDB_MODES @@ -235,9 +228,9 @@ class Config : public std::enable_shared_from_this bool ARTIFICIALLY_PESSIMIZE_MERGES_FOR_TESTING; // A config parameter that avoids counting level 0 merge events and those - // within Bucket::fresh; this option exists only for calculating adjustments - // to the expected count of merges when stopping and resuming merges, - // and should be false in all normal cases. + // within LiveBucket::fresh; this option exists only for calculating + // adjustments to the expected count of merges when stopping and resuming + // merges, and should be false in all normal cases. bool ARTIFICIALLY_REDUCE_MERGE_COUNTS_FOR_TESTING; // A config parameter that skips adjustment of target outbound connections @@ -377,20 +370,10 @@ class Config : public std::enable_shared_from_this // be set to `false` only for testing purposes. bool MODE_ENABLES_BUCKETLIST; - // A config parameter that uses a never-committing ledger. This means that - // all ledger entries will be kept in memory, and not persisted to DB - // (relevant tables won't even be created). This should not be set for - // production validators. - bool MODE_USES_IN_MEMORY_LEDGER; - // A config parameter that can be set to true (in a captive-core // configuration) to delay emitting metadata by one ledger. bool EXPERIMENTAL_PRECAUTION_DELAY_META; - // A config parameter that when set uses SQL as the primary - // key-value store for LedgerEntry lookups instead of BucketListDB. - bool DEPRECATED_SQL_LEDGER_STATE; - // Page size exponent used by BucketIndex when indexing ranges of // BucketEntry's. If set to 0, BucketEntry's are individually indexed. // Otherwise, pageSize == @@ -415,10 +398,6 @@ class Config : public std::enable_shared_from_this // persisted. bool BUCKETLIST_DB_PERSIST_INDEX; - // When set to true, eviction scans occur on the background thread, - // increasing performance. Requires EXPERIMENTAL_BUCKETLIST_DB. - bool BACKGROUND_EVICTION_SCAN; - // A config parameter that stores historical data, such as transactions, // fees, and scp history in the database bool MODE_STORES_HISTORY_MISC; @@ -698,6 +677,20 @@ class Config : public std::enable_shared_from_this // doing a graceful shutdown bool TEST_CASES_ENABLED; + // A config parameter that uses a never-committing ledger. This means that + // all ledger entries will be kept in memory, and not persisted to DB. + // Should only be used for testing. + bool MODE_USES_IN_MEMORY_LEDGER; + + // When set, any CONTRACT_DATA entry with the key "miss" will act as if an + // archival filter miss has ocurred. + bool ARTIFICIALLY_SIMULATE_ARCHIVE_FILTER_MISS{false}; + + // When set, persistent entries require proofs immediately after eviction. + // Note that proofs are still not required if an entry is just expired but + // not yet evicted + bool REQUIRE_PROOFS_FOR_ALL_EVICTED_ENTRIES{false}; + // Set QUORUM_SET using automatic quorum set configuration based on // `validators`. void @@ -730,10 +723,7 @@ class Config : public std::enable_shared_from_this std::chrono::seconds getExpectedLedgerCloseTime() const; - void setInMemoryMode(); bool modeDoesCatchupWithBucketList() const; - bool isInMemoryMode() const; - bool isInMemoryModeWithoutMinimalDB() const; bool isUsingBucketListDB() const; bool isUsingBackgroundEviction() const; bool isPersistingBucketListDBIndexes() const; diff --git a/src/main/PersistentState.cpp b/src/main/PersistentState.cpp index 150d3f62ab..ecb7c12eaa 100644 --- a/src/main/PersistentState.cpp +++ b/src/main/PersistentState.cpp @@ -149,32 +149,24 @@ PersistentState::setSCPStateV1ForSlot( } bool -PersistentState::shouldRebuildForType(LedgerEntryType let) +PersistentState::shouldRebuildForOfferTable() { ZoneScoped; - return !getFromDb(getStoreStateName(kRebuildLedger, let)).empty(); + return !getFromDb(getStoreStateName(kRebuildLedger, OFFER)).empty(); } void -PersistentState::clearRebuildForType(LedgerEntryType let) +PersistentState::clearRebuildForOfferTable() { ZoneScoped; - updateDb(getStoreStateName(kRebuildLedger, let), ""); + updateDb(getStoreStateName(kRebuildLedger, OFFER), ""); } void -PersistentState::setRebuildForType(LedgerEntryType let) +PersistentState::setRebuildForOfferTable() { ZoneScoped; - - // Only allow rebuilds for offer table if BucketListDB enabled, other tables - // don't exist - if (mApp.getConfig().isUsingBucketListDB() && let != OFFER) - { - return; - } - - updateDb(getStoreStateName(kRebuildLedger, let), "1"); + updateDb(getStoreStateName(kRebuildLedger, OFFER), "1"); } void diff --git a/src/main/PersistentState.h b/src/main/PersistentState.h index c22cd59e57..7dc359ae2e 100644 --- a/src/main/PersistentState.h +++ b/src/main/PersistentState.h @@ -46,9 +46,9 @@ class PersistentState setSCPStateV1ForSlot(uint64 slot, std::string const& value, std::unordered_map const& txSets); - bool shouldRebuildForType(LedgerEntryType let); - void clearRebuildForType(LedgerEntryType let); - void setRebuildForType(LedgerEntryType let); + bool shouldRebuildForOfferTable(); + void clearRebuildForOfferTable(); + void setRebuildForOfferTable(); bool hasTxSet(Hash const& txSetHash); void deleteTxSets(std::unordered_set hashesToDelete); diff --git a/src/main/QueryServer.cpp b/src/main/QueryServer.cpp index 97657105a1..ec05ad1af4 100644 --- a/src/main/QueryServer.cpp +++ b/src/main/QueryServer.cpp @@ -6,8 +6,13 @@ #include "bucket/BucketListSnapshot.h" #include "bucket/BucketSnapshotManager.h" #include "ledger/LedgerTxnImpl.h" +#include "ledger/LedgerTypeUtils.h" +#include "main/Config.h" +#include "util/ArchivalProofs.h" #include "util/Logging.h" +#include "util/UnorderedSet.h" #include "util/XDRStream.h" // IWYU pragma: keep +#include "util/types.h" #include #include @@ -54,20 +59,35 @@ namespace stellar { QueryServer::QueryServer(const std::string& address, unsigned short port, int maxClient, size_t threadPoolSize, - BucketSnapshotManager& bucketSnapshotManager) + BucketSnapshotManager& bucketSnapshotManager +#ifdef BUILD_TESTS + , + Config const& cfg +#endif + ) : mServer(address, port, maxClient, threadPoolSize) +#ifdef BUILD_TESTS + , mRequireProofsForAllEvictedEntries( + cfg.REQUIRE_PROOFS_FOR_ALL_EVICTED_ENTRIES) + , mSimulateFilterMiss(cfg.ARTIFICIALLY_SIMULATE_ARCHIVE_FILTER_MISS) +#endif { LOG_INFO(DEFAULT_LOG, "Listening on {}:{} for Query requests", address, port); mServer.add404(std::bind(&QueryServer::notFound, this, _1, _2, _3)); addRoute("getledgerentryraw", &QueryServer::getLedgerEntryRaw); + addRoute("getledgerentry", &QueryServer::getLedgerEntry); + addRoute("getrestoreproof", &QueryServer::getRestoreProof); + addRoute("getcreationproof", &QueryServer::getCreationProof); auto workerPids = mServer.start(); for (auto pid : workerPids) { mBucketListSnapshots[pid] = - std::move(bucketSnapshotManager.copySearchableBucketListSnapshot()); + bucketSnapshotManager.copySearchableLiveBucketListSnapshot(); + mHotArchiveBucketListSnapshots[pid] = + bucketSnapshotManager.copySearchableHotArchiveBucketListSnapshot(); } } @@ -149,16 +169,17 @@ QueryServer::getLedgerEntryRaw(std::string const& params, { root["ledgerSeq"] = *snapshotLedger; - bool snapshotExists; - std::tie(loadedKeys, snapshotExists) = + auto loadedKeysOp = bl.loadKeysFromLedger(orderedKeys, *snapshotLedger); // Return 404 if ledgerSeq not found - if (!snapshotExists) + if (!loadedKeysOp) { retStr = "LedgerSeq not found"; return false; } + + loadedKeys = std::move(*loadedKeysOp); } // Otherwise default to current ledger else @@ -184,4 +205,302 @@ QueryServer::getLedgerEntryRaw(std::string const& params, retStr = Json::FastWriter().write(root); return true; } + +bool +QueryServer::getLedgerEntry(std::string const& params, std::string const& body, + std::string& retStr) +{ + ZoneScoped; + Json::Value root; + + std::map> paramMap; + httpThreaded::server::server::parsePostParams(body, paramMap); + + auto keys = paramMap["key"]; + auto snapshotLedger = parseOptionalParam(paramMap, "ledgerSeq"); + + if (keys.empty()) + { + throw std::invalid_argument( + "Must specify ledger key in POST body: key="); + } + + // First query LiveBucketList for the entries and TTLs + auto& bl = *mBucketListSnapshots.at(std::this_thread::get_id()); + LedgerKeySet orderedKeys; + for (auto const& key : keys) + { + LedgerKey k; + fromOpaqueBase64(k, key); + if (k.type() == TTL) + { + throw std::invalid_argument( + "Must not query TTL keys. For live BucketList key-value " + "lookup use getledgerentryraw"); + } + + orderedKeys.emplace(k); + if (isSorobanEntry(k)) + { + orderedKeys.emplace(getTTLKey(k)); + } + } + + std::vector loadedLiveKeys; + uint32_t ledgerSeq; + + // If a snapshot ledger is specified, use it to get the ledger entry + if (snapshotLedger) + { + ledgerSeq = *snapshotLedger; + + auto loadedKeysOp = bl.loadKeysFromLedger(orderedKeys, *snapshotLedger); + + // Return 404 if ledgerSeq not found + if (!loadedKeysOp) + { + retStr = "LedgerSeq not found"; + return false; + } + + loadedLiveKeys = std::move(*loadedKeysOp); + } + // Otherwise default to current ledger and use stable ledgerSeq for + // later calls + else + { + loadedLiveKeys = + bl.loadKeysWithLimits(orderedKeys, /*lkMeter=*/nullptr); + ledgerSeq = bl.getLedgerSeq(); + } + + root["ledgerSeq"] = ledgerSeq; + + UnorderedMap ttlEntries; + UnorderedMap liveEntries; + UnorderedSet deadOrArchivedEntries; + for (auto const& le : loadedLiveKeys) + { + if (le.data.type() == TTL) + { + ttlEntries.emplace(LedgerEntryKey(le), le); + } + else + { + liveEntries.emplace(LedgerEntryKey(le), le); + } + } + + for (auto const& key : orderedKeys) + { + if (key.type() != TTL && liveEntries.find(key) == liveEntries.end()) + { + deadOrArchivedEntries.emplace(key); + } + } + + // First process entries from the LiveBucketList + for (auto const& [lk, le] : liveEntries) + { + Json::Value entry; + entry["e"] = toOpaqueBase64(le); + if (!isSorobanEntry(le.data)) + { + entry["state"] = "live"; + } + else + { + auto const& ttl = ttlEntries.at(getTTLKey(lk)); + if (isLive(ttl, ledgerSeq)) + { + entry["state"] = "live"; + } + // Dead entry, temp never require a proof + else if (isTemporaryEntry(le.data)) + { + entry["state"] = "new_entry_no_proof"; + } + // Archived but not yet evicted entries do not require proofs + else + { + entry["state"] = "archived_no_proof"; + } + } + + root["entries"].append(entry); + } + + // Next process all keys not found in live BucketList + LedgerKeySet archivedOrNewSorobanKeys; + for (auto const& key : deadOrArchivedEntries) + { + + // Classic and temp never require proofs + if (isSorobanEntry(key) && !isTemporaryEntry(key)) + { + archivedOrNewSorobanKeys.emplace(key); + } + else + { + Json::Value entry; + entry["e"] = toOpaqueBase64(key); + entry["state"] = "new_entry_no_proof"; + root["entries"].append(entry); + } + } + + // Search Hot Archive for remaining persistent keys + auto& hotBL = mHotArchiveBucketListSnapshots.at(std::this_thread::get_id()); + auto loadedHotArchiveEntriesOp = + hotBL->loadKeysFromLedger(archivedOrNewSorobanKeys, ledgerSeq); + + if (!loadedHotArchiveEntriesOp) + { + retStr = "LedgerSeq not found"; + return false; + } + + // Process entries currently marked as archived in the Hot Archive + for (auto const& be : *loadedHotArchiveEntriesOp) + { + if (be.type() == HOT_ARCHIVE_ARCHIVED) + { + auto const& le = be.archivedEntry(); + Json::Value entry; + entry["e"] = toOpaqueBase64(le); + + if (mRequireProofsForAllEvictedEntries) + { + entry["state"] = "archived_proof"; + } + else + { + + entry["state"] = "archived_no_proof"; + } + root["entries"].append(entry); + archivedOrNewSorobanKeys.erase(LedgerEntryKey(le)); + } + } + + // At this point all entries remaining in archivedOrNewSorobanKeys are + // persistent entries that do not exist + for (auto const& key : archivedOrNewSorobanKeys) + { + Json::Value entry; + entry["e"] = toOpaqueBase64(key); + +#ifdef BUILD_TESTS + if (mSimulateFilterMiss) + { + if (key.type() == CONTRACT_DATA && + key.contractData().key.type() == SCV_SYMBOL && + key.contractData().key.sym() == "miss") + { + entry["state"] = "new_entry_proof"; + } + else + { + entry["state"] = "new_entry_no_proof"; + } + } + else +#endif + entry["state"] = "new_entry_no_proof"; + + root["entries"].append(entry); + } + + retStr = Json::FastWriter().write(root); + return true; +} + +bool +QueryServer::getRestoreProof(std::string const& params, std::string const& body, + std::string& retStr) +{ + ZoneScoped; + Json::Value root; + + std::map> paramMap; + httpThreaded::server::server::parsePostParams(body, paramMap); + + auto keys = paramMap["key"]; + auto snapshotLedger = parseOptionalParam(paramMap, "ledgerSeq"); + if (keys.empty()) + { + throw std::invalid_argument( + "Must specify ledger key in POST body: key="); + } + + xdr::xvector proof; + auto& hotBL = mHotArchiveBucketListSnapshots.at(std::this_thread::get_id()); + for (auto const& key : keys) + { + LedgerKey lk; + fromOpaqueBase64(lk, key); + if (!isPersistentEntry(lk)) + { + throw std::invalid_argument( + "Only persistent entries require restoration proofs"); + } + + if (!addRestorationProof(hotBL, lk, proof, snapshotLedger)) + { + throw std::invalid_argument("No valid proof exists for key"); + } + } + + root["ledger"] = hotBL->getLedgerSeq(); + root["proof"] = toOpaqueBase64(proof); + + retStr = Json::FastWriter().write(root); + return true; +} + +bool +QueryServer::getCreationProof(std::string const& params, + std::string const& body, std::string& retStr) +{ + ZoneScoped; + Json::Value root; + + std::map> paramMap; + httpThreaded::server::server::parsePostParams(body, paramMap); + + auto keys = paramMap["key"]; + auto snapshotLedger = parseOptionalParam(paramMap, "ledgerSeq"); + if (keys.empty()) + { + throw std::invalid_argument( + "Must specify ledger key in POST body: key="); + } + + auto& hotBL = mHotArchiveBucketListSnapshots.at(std::this_thread::get_id()); + xdr::xvector proof; + for (auto const& key : keys) + { + LedgerKey lk; + fromOpaqueBase64(lk, key); + if (!isPersistentEntry(lk) || lk.type() != CONTRACT_DATA) + { + throw std::invalid_argument("Only persistent contract data entries " + "require creation proofs"); + } + + if (!addCreationProof(mSimulateFilterMiss, lk, proof)) + { + throw std::invalid_argument("No valid proof exists for key"); + } + } + + root["ledger"] = snapshotLedger ? *snapshotLedger : hotBL->getLedgerSeq(); + root["proof"] = toOpaqueBase64(proof); + + retStr = Json::FastWriter().write(root); + return true; +} } \ No newline at end of file diff --git a/src/main/QueryServer.h b/src/main/QueryServer.h index f16a79c945..ba97877377 100644 --- a/src/main/QueryServer.h +++ b/src/main/QueryServer.h @@ -5,6 +5,7 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "lib/httpthreaded/server.hpp" +#include "main/Config.h" #include #include @@ -14,7 +15,8 @@ namespace stellar { -class SearchableBucketListSnapshot; +class SearchableLiveBucketListSnapshot; +class SearchableHotArchiveBucketListSnapshot; class BucketSnapshotManager; class QueryServer @@ -26,8 +28,16 @@ class QueryServer httpThreaded::server::server mServer; std::unordered_map> + std::shared_ptr> mBucketListSnapshots; + std::unordered_map> + mHotArchiveBucketListSnapshots; + +#ifdef BUILD_TESTS + bool const mRequireProofsForAllEvictedEntries; + bool const mSimulateFilterMiss; +#endif bool safeRouter(HandlerRoute route, std::string const& params, std::string const& body, std::string& retStr); @@ -37,14 +47,35 @@ class QueryServer void addRoute(std::string const& name, HandlerRoute route); - // Returns raw LedgerKeys for the given keys from the Live BucketList. Does - // not query other BucketLists or reason about archival. + // Returns raw LedgerEntries for the given keys from the Live BucketList. + // Does not query other BucketLists or reason about archival. bool getLedgerEntryRaw(std::string const& params, std::string const& body, std::string& retStr); + // Returns LedgerEntries for the given keys in addition to archival state. + // This function may query BucketLists in addition to the Live BucketList, + // query archival filters, etc. to provide complete information about the + // given LedgerKey. + bool getLedgerEntry(std::string const& params, std::string const& body, + std::string& retStr); + + // Returns restoration/creation proofs for the given set of keys. + bool getRestoreProof(std::string const& params, std::string const& body, + std::string& retStr); + + // Test string for artificial miss: + // AAAABgAAAAEBuCkP1Jtb0TMPDqcYvepnKTIGIPZc4adjZXxXY4xYCwAAAA8AAAAEbWlzcwAAAAE= + bool getCreationProof(std::string const& params, std::string const& body, + std::string& retStr); + public: QueryServer(const std::string& address, unsigned short port, int maxClient, size_t threadPoolSize, - BucketSnapshotManager& bucketSnapshotManager); + BucketSnapshotManager& bucketSnapshotManager +#ifdef BUILD_TESTS + , + Config const& cfg +#endif + ); }; } \ No newline at end of file diff --git a/src/main/SettingsUpgradeUtils.cpp b/src/main/SettingsUpgradeUtils.cpp index 1b241dadf2..298b3702a7 100644 --- a/src/main/SettingsUpgradeUtils.cpp +++ b/src/main/SettingsUpgradeUtils.cpp @@ -13,6 +13,10 @@ getWasmRestoreTx(PublicKey const& publicKey, SequenceNumber seqNum) TransactionEnvelope txEnv; txEnv.type(ENVELOPE_TYPE_TX); +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + txEnv.v1().tx.ext.sorobanData().ext.v(1); +#endif + auto& tx = txEnv.v1().tx; tx.sourceAccount = toMuxedAccount(publicKey); tx.fee = 100'000'000; @@ -49,6 +53,10 @@ getWasmRestoreTx(PublicKey const& publicKey, SequenceNumber seqNum) tx.ext.sorobanData().resources = restoreResources; tx.ext.sorobanData().resourceFee = 55'000'000; +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + tx.ext.sorobanData().ext.v(1); +#endif + return {txEnv, contractCodeLedgerKey}; } @@ -96,6 +104,10 @@ getUploadTx(PublicKey const& publicKey, SequenceNumber seqNum) tx.ext.sorobanData().resources = uploadResources; tx.ext.sorobanData().resourceFee = 55'000'000; +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + tx.ext.sorobanData().ext.v(1); +#endif + return {txEnv, contractCodeLedgerKey}; } @@ -180,6 +192,10 @@ getCreateTx(PublicKey const& publicKey, LedgerKey const& contractCodeLedgerKey, tx.ext.sorobanData().resources = uploadResources; tx.ext.sorobanData().resourceFee = 15'000'000; +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + tx.ext.sorobanData().ext.v(1); +#endif + return {txEnv, contractSourceRefLedgerKey, contractID}; } @@ -274,6 +290,10 @@ getInvokeTx(PublicKey const& publicKey, LedgerKey const& contractCodeLedgerKey, tx.ext.sorobanData().resources = invokeResources; tx.ext.sorobanData().resourceFee = 65'000'000; +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + tx.ext.sorobanData().ext.v(1); +#endif + ConfigUpgradeSetKey key; key.contentHash = upgradeHash; key.contractID = contractID; diff --git a/src/main/main.cpp b/src/main/main.cpp index 6b68a0f5db..9f6c5edccb 100644 --- a/src/main/main.cpp +++ b/src/main/main.cpp @@ -385,7 +385,8 @@ main(int argc, char* const* argv) checkStellarCoreMajorVersionProtocolIdentity(); rust_bridge::check_sensible_soroban_config_for_protocol( Config::CURRENT_LEDGER_PROTOCOL_VERSION); - checkXDRFileIdentity(); + // TODO: Add back when rs-stellar-xdr lands + // checkXDRFileIdentity(); int res = handleCommandLine(argc, argv); #ifdef USE_TRACY diff --git a/src/main/test/ApplicationUtilsTests.cpp b/src/main/test/ApplicationUtilsTests.cpp index bc3e1e8451..31ee9dbba9 100644 --- a/src/main/test/ApplicationUtilsTests.cpp +++ b/src/main/test/ApplicationUtilsTests.cpp @@ -2,11 +2,13 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 +#include "bucket/test/BucketTestUtils.h" #include "crypto/Random.h" #include "history/HistoryArchiveManager.h" #include "history/test/HistoryTestsUtils.h" #include "invariant/BucketListIsConsistentWithDatabase.h" #include "ledger/LedgerTxn.h" +#include "ledger/test/LedgerTestUtils.h" #include "lib/catch.hpp" #include "main/Application.h" #include "main/ApplicationUtils.h" @@ -51,45 +53,6 @@ class TemporaryFileDamager } }; -class TemporarySQLiteDBDamager : public TemporaryFileDamager -{ - Config mConfig; - static std::filesystem::path - getSQLiteDBPath(Config const& cfg) - { - auto str = cfg.DATABASE.value; - std::string prefix = "sqlite3://"; - REQUIRE(str.find(prefix) == 0); - str = str.substr(prefix.size()); - REQUIRE(!str.empty()); - std::filesystem::path path(str); - REQUIRE(std::filesystem::exists(path)); - return path; - } - - public: - TemporarySQLiteDBDamager(Config const& cfg) - : TemporaryFileDamager(getSQLiteDBPath(cfg)), mConfig(cfg) - { - } - void - damageVictim() override - { - // Damage a database by bumping the root account's last-modified. - VirtualClock clock; - auto app = createTestApplication(clock, mConfig, /*newDB=*/false); - LedgerTxn ltx(app->getLedgerTxnRoot(), - /*shouldUpdateLastModified=*/false); - { - auto rootKey = accountKey( - stellar::txtest::getRoot(app->getNetworkID()).getPublicKey()); - auto rootLe = ltx.load(rootKey); - rootLe.current().lastModifiedLedgerSeq += 1; - } - ltx.commit(); - } -}; - // Logic to check the state of the bucket list with the state of the DB static bool checkState(Application& app) @@ -107,7 +70,7 @@ checkState(Application& app) blcOk = false; } - if (app.getConfig().isUsingBucketListDB()) + if (!app.getConfig().MODE_USES_IN_MEMORY_LEDGER) { auto checkBucket = [&blcOk](auto b) { if (!b->isEmpty() && !b->isIndexed()) @@ -120,16 +83,17 @@ checkState(Application& app) }; auto& bm = app.getBucketManager(); - for (uint32_t i = 0; i < bm.getBucketList().kNumLevels && blcOk; ++i) + for (uint32_t i = 0; i < bm.getLiveBucketList().kNumLevels && blcOk; + ++i) { - auto& level = bm.getBucketList().getLevel(i); + auto& level = bm.getLiveBucketList().getLevel(i); checkBucket(level.getCurr()); checkBucket(level.getSnap()); auto& nextFuture = level.getNext(); if (nextFuture.hasOutputHash()) { auto hash = hexToBin256(nextFuture.getOutputHash()); - checkBucket(bm.getBucketByHash(hash)); + checkBucket(bm.getLiveBucketByHash(hash)); } } } @@ -309,82 +273,6 @@ class SimulationHelper { mSimulation->removeNode(mTestNodeID); } - - void - runStartupTest(bool triggerCatchup, uint32_t startFromLedger, - std::string startFromHash, uint32_t lclLedgerSeq) - { - bool isInMemoryMode = startFromLedger != 0 && !startFromHash.empty(); - if (isInMemoryMode) - { - REQUIRE(canRebuildInMemoryLedgerFromBuckets(startFromLedger, - lclLedgerSeq)); - } - - uint32_t checkpointFrequency = 8; - - // Depending on how many ledgers we buffer during bucket - // apply, core might trim some and only keep checkpoint - // ledgers. In this case, after bucket application, normal - // catchup will be triggered. - uint32_t delayBuckets = triggerCatchup ? (2 * checkpointFrequency) - : (checkpointFrequency / 2); - mTestCfg.ARTIFICIALLY_DELAY_BUCKET_APPLICATION_FOR_TESTING = - std::chrono::seconds(delayBuckets); - - // Start test app - auto app = mSimulation->addNode(mTestNodeSecretKey, mQuorum, &mTestCfg, - false, startFromLedger, startFromHash); - mSimulation->addPendingConnection(mMainNodeID, mTestNodeID); - REQUIRE(app); - mSimulation->startAllNodes(); - - // Ensure nodes are connected - if (!app->getConfig().MODE_AUTO_STARTS_OVERLAY) - { - app->getOverlayManager().start(); - } - - if (isInMemoryMode) - { - REQUIRE(app->getLedgerManager().getState() == - LedgerManager::LM_CATCHING_UP_STATE); - } - - auto downloaded = - app->getCatchupManager().getCatchupMetrics().mCheckpointsDownloaded; - - Upgrades::UpgradeParameters scheduledUpgrades; - scheduledUpgrades.mUpgradeTime = - VirtualClock::from_time_t(mMainNode->getLedgerManager() - .getLastClosedLedgerHeader() - .header.scpValue.closeTime); - scheduledUpgrades.mProtocolVersion = - static_cast(SOROBAN_PROTOCOL_VERSION); - mMainNode->getHerder().setUpgrades(scheduledUpgrades); - - generateLoad(false); - generateLoad(true); - - // State has been rebuilt and node is properly in sync - REQUIRE(checkState(*app)); - REQUIRE(app->getLedgerManager().getLastClosedLedgerNum() == - getMainNodeLCL().header.ledgerSeq); - REQUIRE(app->getLedgerManager().isSynced()); - - if (triggerCatchup) - { - REQUIRE(downloaded < app->getCatchupManager() - .getCatchupMetrics() - .mCheckpointsDownloaded); - } - else - { - REQUIRE(downloaded == app->getCatchupManager() - .getCatchupMetrics() - .mCheckpointsDownloaded); - } - } }; TEST_CASE("verify checkpoints command - wait condition", "[applicationutils]") @@ -401,7 +289,7 @@ TEST_CASE("verify checkpoints command - wait condition", "[applicationutils]") qSet.validators.push_back(vNode1NodeID); Config cfg1 = getTestConfig(1); - Config cfg2 = getTestConfig(2, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config cfg2 = getTestConfig(2, Config::TESTDB_IN_MEMORY); cfg2.FORCE_SCP = false; cfg2.NODE_IS_VALIDATOR = false; cfg2.MODE_DOES_CATCHUP = false; @@ -448,12 +336,12 @@ TEST_CASE("offline self-check works", "[applicationutils][selfcheck]") // Step 2: make a new application and catch it up part-way to the // archives (but behind). auto app = catchupSimulation.createCatchupApplication( - std::numeric_limits::max(), Config::TESTDB_ON_DISK_SQLITE, - "client"); + std::numeric_limits::max(), + Config::TESTDB_BUCKET_DB_PERSISTENT, "client"); catchupSimulation.catchupOffline(app, l1); chkConfig = app->getConfig(); victimBucketPath = app->getBucketManager() - .getBucketList() + .getLiveBucketList() .getLevel(0) .getCurr() ->getFilename(); @@ -490,146 +378,14 @@ TEST_CASE("offline self-check works", "[applicationutils][selfcheck]") damage.damageVictim(); REQUIRE(selfCheck(chkConfig) == 1); } - { - // Damage the SQL ledger. - TemporarySQLiteDBDamager damage(chkConfig); - damage.damageVictim(); - REQUIRE(selfCheck(chkConfig) == 1); - } } TEST_CASE("application setup", "[applicationutils]") { VirtualClock clock; - - SECTION("SQL DB mode") - { - auto cfg = getTestConfig(); - auto app = setupApp(cfg, clock, 0, ""); - REQUIRE(checkState(*app)); - } - - auto testInMemoryMode = [&](Config& cfg1, Config& cfg2) { - // Publish a few checkpoints then shut down test node - auto simulation = SimulationHelper(cfg1, cfg2); - auto [startFromLedger, startFromHash] = - simulation.publishCheckpoints(2); - auto lcl = simulation.getTestNodeLCL(); - simulation.shutdownTestNode(); - - SECTION("minimal DB setup") - { - SECTION("not found") - { - // Remove `buckets` dir completely - fs::deltree(cfg2.BUCKET_DIR_PATH); - - // Initialize new minimal DB from scratch - auto app = setupApp(cfg2, clock, 0, ""); - REQUIRE(app); - REQUIRE(checkState(*app)); - } - SECTION("found") - { - // Found existing minimal DB, reset to genesis - auto app = setupApp(cfg2, clock, 0, ""); - REQUIRE(app); - REQUIRE(checkState(*app)); - } - } - SECTION("rebuild state") - { - SECTION("from buckets") - { - auto selectedLedger = lcl.header.ledgerSeq; - auto selectedHash = binToHex(lcl.hash); - - SECTION("replay buffered ledgers") - { - simulation.runStartupTest(false, selectedLedger, - selectedHash, - lcl.header.ledgerSeq); - } - SECTION("trigger catchup") - { - simulation.runStartupTest(true, selectedLedger, - selectedHash, - lcl.header.ledgerSeq); - } - SECTION("start from future ledger") - { - // Validator publishes more checkpoints while the - // captive-core instance is shutdown - auto [selectedLedger2, selectedHash2] = - simulation.publishCheckpoints(4); - simulation.runStartupTest(true, selectedLedger2, - selectedHash2, - lcl.header.ledgerSeq); - } - } - SECTION("via catchup") - { - // startAtLedger is behind LCL, reset to genesis and catchup - REQUIRE(!canRebuildInMemoryLedgerFromBuckets( - startFromLedger, lcl.header.ledgerSeq)); - auto app = - setupApp(cfg2, clock, startFromLedger, startFromHash); - REQUIRE(app); - REQUIRE(checkState(*app)); - REQUIRE(app->getLedgerManager().getLastClosedLedgerNum() == - startFromLedger); - REQUIRE(app->getLedgerManager().getState() == - LedgerManager::LM_CATCHING_UP_STATE); - } - - SECTION("bad hash") - { - // Create mismatch between start-from ledger and hash - auto app = - setupApp(cfg2, clock, startFromLedger + 1, startFromHash); - REQUIRE(!app); - } - } - SECTION("set meta stream") - { - TmpDirManager tdm(std::string("streamtmp-") + - binToHex(randomBytes(8))); - TmpDir td = tdm.tmpDir("streams"); - std::string path = td.getName() + "/stream.xdr"; - - // Remove `buckets` dir completely to ensure multiple apps are - // initialized during setup - fs::deltree(cfg2.BUCKET_DIR_PATH); - SECTION("file path") - { - cfg2.METADATA_OUTPUT_STREAM = path; - - auto app = setupApp(cfg2, clock, 0, ""); - REQUIRE(app); - REQUIRE(checkState(*app)); - } -#ifdef _WIN32 -#else - SECTION("fd") - { - int fd = ::open(path.c_str(), O_CREAT | O_WRONLY, 0644); - REQUIRE(fd != -1); - cfg2.METADATA_OUTPUT_STREAM = fmt::format("fd:{}", fd); - - auto app = setupApp(cfg2, clock, 0, ""); - REQUIRE(app); - REQUIRE(checkState(*app)); - } -#endif - } - }; - SECTION("in memory mode") - { - Config cfg1 = getTestConfig(1); - Config cfg2 = getTestConfig(2, Config::TESTDB_IN_MEMORY_NO_OFFERS); - cfg2.DATABASE = SecretValue{minimalDBForInMemoryMode(cfg2)}; - testInMemoryMode(cfg1, cfg2); - } + auto cfg = getTestConfig(); + auto app = setupApp(cfg, clock); + REQUIRE(checkState(*app)); } TEST_CASE("application major version numbers", "[applicationutils]") diff --git a/src/main/test/ConfigTests.cpp b/src/main/test/ConfigTests.cpp index 29938731a9..5c5d8c8e36 100644 --- a/src/main/test/ConfigTests.cpp +++ b/src/main/test/ConfigTests.cpp @@ -286,7 +286,6 @@ TEST_CASE("bad validators configs", "[config]") NODE_SEED="SA7FGJMMUIHNE3ZPI2UO5I632A7O5FBAZTXFAIEVFA4DSSGLHXACLAIT a3" {NODE_HOME_DOMAIN} NODE_IS_VALIDATOR=true -DEPRECATED_SQL_LEDGER_STATE=true ############################ # list of HOME_DOMAINS @@ -473,9 +472,7 @@ TEST_CASE("nesting level", "[config]") auto secretKey = SecretKey::fromSeed(hash); return secretKey.getStrKeyPublic(); }; - std::string configNesting = - "DEPRECATED_SQL_LEDGER_STATE=true\n" // Required for all configs - "UNSAFE_QUORUM=true"; + std::string configNesting = "UNSAFE_QUORUM=true"; std::string quorumSetNumber = ""; std::string quorumSetTemplate = R"( @@ -536,7 +533,6 @@ TEST_CASE("operation filter configuration", "[config]") }; std::stringstream ss; - ss << "DEPRECATED_SQL_LEDGER_STATE=true\n"; // required for all configs ss << "UNSAFE_QUORUM=true\n"; toConfigStr(vals, ss); ss << "\n[QUORUM_SET]\n"; diff --git a/src/main/test/ExternalQueueTests.cpp b/src/main/test/ExternalQueueTests.cpp deleted file mode 100644 index c44713ea7f..0000000000 --- a/src/main/test/ExternalQueueTests.cpp +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2014 Stellar Development Foundation and contributors. Licensed -// under the Apache License, Version 2.0. See the COPYING file at the root -// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 - -#include "lib/catch.hpp" -#include "main/Application.h" -#include "main/CommandHandler.h" -#include "main/Config.h" -#include "main/ExternalQueue.h" -#include "simulation/Simulation.h" -#include "test/TestUtils.h" -#include "test/test.h" - -using namespace stellar; - -TEST_CASE("cursors", "[externalqueue]") -{ - VirtualClock clock; - Config const& cfg = getTestConfig(0, Config::TESTDB_ON_DISK_SQLITE); - Application::pointer app = createTestApplication(clock, cfg); - - ExternalQueue ps(*app); - std::map curMap; - app->getCommandHandler().manualCmd("setcursor?id=FOO&cursor=123"); - app->getCommandHandler().manualCmd("setcursor?id=BAR&cursor=456"); - - SECTION("get non-existent cursor") - { - ps.getCursorForResource("NONEXISTENT", curMap); - REQUIRE(curMap.size() == 0); - } - - SECTION("get single cursor") - { - ps.getCursorForResource("FOO", curMap); - REQUIRE(curMap.size() == 1); - } - - SECTION("get all cursors") - { - ps.getCursorForResource("", curMap); - REQUIRE(curMap.size() == 2); - } -} diff --git a/src/overlay/test/FloodTests.cpp b/src/overlay/test/FloodTests.cpp index e224530931..e2f9b50ac1 100644 --- a/src/overlay/test/FloodTests.cpp +++ b/src/overlay/test/FloodTests.cpp @@ -73,9 +73,8 @@ TEST_CASE("Flooding", "[flood][overlay][acceptance]") auto const& header = n->getLedgerManager() .getLastClosedLedgerHeader() .header; - BucketTestUtils::addBatchAndUpdateSnapshot( - n->getBucketManager().getBucketList(), *n, header, {}, - {gen}, {}); + BucketTestUtils::addLiveBatchAndUpdateSnapshot( + *n, header, {}, {gen}, {}); } } } diff --git a/src/overlay/test/OverlayTests.cpp b/src/overlay/test/OverlayTests.cpp index 7d29ae7c66..ff4cf25554 100644 --- a/src/overlay/test/OverlayTests.cpp +++ b/src/overlay/test/OverlayTests.cpp @@ -140,8 +140,8 @@ TEST_CASE("flow control byte capacity", "[overlay][flowcontrol]") { VirtualClock clock; - auto cfg1 = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); - auto cfg2 = getTestConfig(1, Config::TESTDB_IN_MEMORY_NO_OFFERS); + auto cfg1 = getTestConfig(0, Config::TESTDB_IN_MEMORY); + auto cfg2 = getTestConfig(1, Config::TESTDB_IN_MEMORY); REQUIRE(cfg1.PEER_FLOOD_READING_CAPACITY != cfg1.PEER_FLOOD_READING_CAPACITY_BYTES); diff --git a/src/simulation/CoreTests.cpp b/src/simulation/CoreTests.cpp index 30f1bffd55..423b77e211 100644 --- a/src/simulation/CoreTests.cpp +++ b/src/simulation/CoreTests.cpp @@ -686,9 +686,8 @@ TEST_CASE("Bucket list entries vs write throughput", "[scalability][!hide]") LedgerHeader lh; lh.ledgerVersion = Config::CURRENT_LEDGER_PROTOCOL_VERSION; lh.ledgerSeq = i; - BucketTestUtils::addBatchAndUpdateSnapshot( - app->getBucketManager().getBucketList(), *app, lh, - LedgerTestUtils::generateValidLedgerEntries(100), + BucketTestUtils::addLiveBatchAndUpdateSnapshot( + *app, lh, LedgerTestUtils::generateValidLedgerEntries(100), LedgerTestUtils::generateValidLedgerEntries(20), LedgerTestUtils::generateValidLedgerEntryKeysWithExclusions( {CONFIG_SETTING}, 5)); diff --git a/src/simulation/Simulation.cpp b/src/simulation/Simulation.cpp index 0818922f6a..61413b94ed 100644 --- a/src/simulation/Simulation.cpp +++ b/src/simulation/Simulation.cpp @@ -91,8 +91,7 @@ Simulation::setCurrentVirtualTime(VirtualClock::system_time_point t) Application::pointer Simulation::addNode(SecretKey nodeKey, SCPQuorumSet qSet, Config const* cfg2, - bool newDB, uint32_t startAtLedger, - std::string const& startAtHash) + bool newDB) { auto cfg = cfg2 ? std::make_shared(*cfg2) : std::make_shared(newConfig()); @@ -140,7 +139,7 @@ Simulation::addNode(SecretKey nodeKey, SCPQuorumSet qSet, Config const* cfg2, } else { - app = setupApp(*cfg, *clock, startAtLedger, startAtHash); + app = setupApp(*cfg, *clock); } mNodes.emplace(nodeKey.getPublicKey(), Node{clock, app}); diff --git a/src/simulation/Simulation.h b/src/simulation/Simulation.h index 8743af37f2..e1385f374d 100644 --- a/src/simulation/Simulation.h +++ b/src/simulation/Simulation.h @@ -50,9 +50,8 @@ class Simulation // Add new node to the simulation. This function does not start the node. // Callers are expected to call `start` or `startAllNodes` manually. Application::pointer addNode(SecretKey nodeKey, SCPQuorumSet qSet, - Config const* cfg = nullptr, bool newDB = true, - uint32_t startAtLedger = 0, - std::string const& startAtHash = ""); + Config const* cfg = nullptr, + bool newDB = true); Application::pointer getNode(NodeID nodeID); std::vector getNodes(); std::vector getNodeIDs(); diff --git a/src/simulation/test/LoadGeneratorTests.cpp b/src/simulation/test/LoadGeneratorTests.cpp index fd31de9b05..c2b6a6bf85 100644 --- a/src/simulation/test/LoadGeneratorTests.cpp +++ b/src/simulation/test/LoadGeneratorTests.cpp @@ -24,7 +24,6 @@ TEST_CASE("generate load in protocol 1") auto cfg = getTestConfig(i); cfg.TESTING_UPGRADE_MAX_TX_SET_SIZE = 5000; cfg.TESTING_UPGRADE_LEDGER_PROTOCOL_VERSION = 1; - cfg.DEPRECATED_SQL_LEDGER_STATE = false; return cfg; }); diff --git a/src/test/FuzzerImpl.cpp b/src/test/FuzzerImpl.cpp index ea19953621..06f24a5844 100644 --- a/src/test/FuzzerImpl.cpp +++ b/src/test/FuzzerImpl.cpp @@ -864,7 +864,6 @@ getFuzzConfig(int instanceNumber) Config cfg = getTestConfig(instanceNumber); cfg.MANUAL_CLOSE = true; cfg.CATCHUP_COMPLETE = false; - cfg.BACKGROUND_EVICTION_SCAN = false; cfg.CATCHUP_RECENT = 0; cfg.ARTIFICIALLY_GENERATE_LOAD_FOR_TESTING = false; cfg.ARTIFICIALLY_SET_CLOSE_TIME_FOR_TESTING = UINT32_MAX; diff --git a/src/test/TestUtils.cpp b/src/test/TestUtils.cpp index 7750fe345a..c4fb9886fb 100644 --- a/src/test/TestUtils.cpp +++ b/src/test/TestUtils.cpp @@ -3,6 +3,7 @@ // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 #include "TestUtils.h" +#include "bucket/BucketList.h" #include "overlay/test/LoopbackPeer.h" #include "simulation/LoadGenerator.h" #include "simulation/Simulation.h" @@ -128,16 +129,21 @@ computeMultiplier(LedgerEntry const& le) } } -BucketListDepthModifier::BucketListDepthModifier(uint32_t newDepth) - : mPrevDepth(BucketList::kNumLevels) +template +BucketListDepthModifier::BucketListDepthModifier(uint32_t newDepth) + : mPrevDepth(BucketListBase::kNumLevels) { - BucketList::kNumLevels = newDepth; + BucketListBase::kNumLevels = newDepth; } -BucketListDepthModifier::~BucketListDepthModifier() +template +BucketListDepthModifier::~BucketListDepthModifier() { - BucketList::kNumLevels = mPrevDepth; + BucketListBase::kNumLevels = mPrevDepth; } + +template class BucketListDepthModifier; +template class BucketListDepthModifier; } TestInvariantManager::TestInvariantManager(medida::MetricsRegistry& registry) @@ -285,7 +291,7 @@ modifySorobanNetworkConfig(Application& app, // Need to close a ledger following call to `addBatch` from config upgrade // to refresh cached state - if (app.getConfig().isUsingBucketListDB()) + if (!app.getConfig().MODE_USES_IN_MEMORY_LEDGER) { txtest::closeLedger(app); } diff --git a/src/test/TestUtils.h b/src/test/TestUtils.h index 83f3e6d4f9..21b3882e6c 100644 --- a/src/test/TestUtils.h +++ b/src/test/TestUtils.h @@ -32,8 +32,11 @@ std::vector getInvalidAssets(SecretKey const& issuer); int32_t computeMultiplier(LedgerEntry const& le); -class BucketListDepthModifier +template class BucketListDepthModifier { + static_assert(std::is_same_v || + std::is_same_v); + uint32_t const mPrevDepth; public: diff --git a/src/test/TxTests.cpp b/src/test/TxTests.cpp index b306e737c1..d5b0cec167 100644 --- a/src/test/TxTests.cpp +++ b/src/test/TxTests.cpp @@ -387,9 +387,13 @@ checkTransaction(TransactionTestFrame& txFrame, Application& app) void applyTx(TransactionTestFramePtr const& tx, Application& app, bool checkSeqNum) { + if (app.getConfig().MODE_USES_IN_MEMORY_LEDGER) + { + applyCheck(tx, app, checkSeqNum); + } // We cannot commit directly to the DB if running BucketListDB, so close a // ledger with the TX instead - if (app.getConfig().isUsingBucketListDB()) + else { auto resultSet = closeLedger(app, {tx}); @@ -404,10 +408,6 @@ applyTx(TransactionTestFramePtr const& tx, Application& app, bool checkSeqNum) REQUIRE(meta.size() == 1); recordOrCheckGlobalTestTxMetadata(meta.back().getXDR()); } - else - { - applyCheck(tx, app, checkSeqNum); - } throwIf(tx->getResult()); checkTransaction(*tx, app); @@ -1675,7 +1675,8 @@ sorobanEnvelopeFromOps(Hash const& networkID, TestAccount& source, SorobanResources const& resources, uint32_t totalFee, int64_t resourceFee, std::optional memo, std::optional seq, - std::optional muxedData) + std::optional muxedData, + std::optional> proofs) { TransactionEnvelope tx(ENVELOPE_TYPE_TX); if (muxedData) @@ -1694,6 +1695,15 @@ sorobanEnvelopeFromOps(Hash const& networkID, TestAccount& source, tx.v1().tx.ext.v(1); tx.v1().tx.ext.sorobanData().resources = resources; tx.v1().tx.ext.sorobanData().resourceFee = resourceFee; + +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + tx.v1().tx.ext.sorobanData().ext.v(1); + if (proofs) + { + tx.v1().tx.ext.sorobanData().ext.proofs() = *proofs; + } +#endif + if (memo) { Memo textMemo(MEMO_TEXT); @@ -1723,13 +1733,13 @@ transactionFrameFromOps(Hash const& networkID, TestAccount& source, } TransactionTestFramePtr -sorobanTransactionFrameFromOps(Hash const& networkID, TestAccount& source, - std::vector const& ops, - std::vector const& opKeys, - SorobanResources const& resources, - uint32_t inclusionFee, int64_t resourceFee, - std::optional memo, - std::optional seq) +sorobanTransactionFrameFromOps( + Hash const& networkID, TestAccount& source, + std::vector const& ops, std::vector const& opKeys, + SorobanResources const& resources, uint32_t inclusionFee, + int64_t resourceFee, std::optional memo, + std::optional seq, + std::optional> proofs) { uint64 totalFee = inclusionFee; totalFee += resourceFee; @@ -1738,7 +1748,7 @@ sorobanTransactionFrameFromOps(Hash const& networkID, TestAccount& source, networkID, sorobanEnvelopeFromOps(networkID, source, ops, opKeys, resources, static_cast(totalFee), resourceFee, memo, - seq, std::nullopt)); + seq, std::nullopt, proofs)); return TransactionTestFrame::fromTxFrame(tx); } @@ -1750,9 +1760,10 @@ sorobanTransactionFrameFromOpsWithTotalFee( std::optional memo, std::optional muxedData) { auto tx = TransactionFrameBase::makeTransactionFromWire( - networkID, sorobanEnvelopeFromOps(networkID, source, ops, opKeys, - resources, totalFee, resourceFee, - memo, std::nullopt, muxedData)); + networkID, + sorobanEnvelopeFromOps(networkID, source, ops, opKeys, resources, + totalFee, resourceFee, memo, std::nullopt, + muxedData, std::nullopt)); return TransactionTestFrame::fromTxFrame(tx); } diff --git a/src/test/TxTests.h b/src/test/TxTests.h index 1eadc32092..055aecafdd 100644 --- a/src/test/TxTests.h +++ b/src/test/TxTests.h @@ -302,7 +302,8 @@ TransactionTestFramePtr sorobanTransactionFrameFromOps( std::vector const& ops, std::vector const& opKeys, SorobanResources const& resources, uint32_t inclusionFee, int64_t resourceFee, std::optional memo = std::nullopt, - std::optional seq = std::nullopt); + std::optional seq = std::nullopt, + std::optional> proofs = std::nullopt); TransactionTestFramePtr sorobanTransactionFrameFromOpsWithTotalFee( Hash const& networkID, TestAccount& source, std::vector const& ops, std::vector const& opKeys, diff --git a/src/test/test.cpp b/src/test/test.cpp index 1227314e78..197e49fe50 100644 --- a/src/test/test.cpp +++ b/src/test/test.cpp @@ -194,10 +194,10 @@ getTestConfig(int instanceNumber, Config::TestDbMode mode) instanceNumber += gBaseInstance; if (mode == Config::TESTDB_DEFAULT) { - // by default, tests should be run with in memory SQLITE as it's faster - // you can change this by enabling the appropriate line below - // mode = Config::TESTDB_IN_MEMORY_OFFERS; - // mode = Config::TESTDB_ON_DISK_SQLITE; + // by default, tests should be run with volatile BucketList as it's + // faster. You can change this by enabling the appropriate line below + // mode = Config::TESTDB_IN_MEMORY; + // mode = Config::TESTDB_BUCKET_DB_PERSISTENT; // mode = Config::TESTDB_POSTGRESQL; mode = Config::TESTDB_BUCKET_DB_VOLATILE; } @@ -283,11 +283,10 @@ getTestConfig(int instanceNumber, Config::TestDbMode mode) switch (mode) { case Config::TESTDB_BUCKET_DB_VOLATILE: - case Config::TESTDB_IN_MEMORY_OFFERS: + case Config::TESTDB_IN_MEMORY: dbname << "sqlite3://:memory:"; break; case Config::TESTDB_BUCKET_DB_PERSISTENT: - case Config::TESTDB_ON_DISK_SQLITE: dbname << "sqlite3://" << rootDir << "test.db"; thisConfig.DISABLE_XDR_FSYNC = false; break; @@ -296,30 +295,17 @@ getTestConfig(int instanceNumber, Config::TestDbMode mode) dbname << "postgresql://dbname=test" << instanceNumber; thisConfig.DISABLE_XDR_FSYNC = false; break; - case Config::TESTDB_IN_MEMORY_NO_OFFERS: - thisConfig.MODE_USES_IN_MEMORY_LEDGER = true; - break; #endif default: abort(); } - if (mode == Config::TESTDB_BUCKET_DB_VOLATILE || - mode == Config::TESTDB_BUCKET_DB_PERSISTENT) - { - thisConfig.DEPRECATED_SQL_LEDGER_STATE = false; - thisConfig.BACKGROUND_EVICTION_SCAN = true; - } - else + if (mode == Config::TESTDB_IN_MEMORY) { - thisConfig.DEPRECATED_SQL_LEDGER_STATE = true; - thisConfig.BACKGROUND_EVICTION_SCAN = false; + thisConfig.MODE_USES_IN_MEMORY_LEDGER = true; } - if (mode != Config::TESTDB_IN_MEMORY_NO_OFFERS) - { - thisConfig.DATABASE = SecretValue{dbname.str()}; - } + thisConfig.DATABASE = SecretValue{dbname.str()}; thisConfig.REPORT_METRICS = gTestMetrics; // disable maintenance @@ -516,6 +502,13 @@ for_versions_from(std::vector const& versions, Application& app, for_versions_from(versions.back() + 1, app, f); } +void +for_versions_from(uint32 from, Config const& cfg, + std::function const& f) +{ + for_versions(from, Config::CURRENT_LEDGER_PROTOCOL_VERSION, cfg, f); +} + void for_all_versions(Application& app, std::function const& f) { @@ -558,6 +551,21 @@ for_versions(uint32 from, uint32 to, Config const& cfg, for_versions(versions, cfg, f); } +void +for_versions(uint32 from, uint32 to, Config const& cfg, + std::function const& f) +{ + if (from > to) + { + return; + } + auto versions = std::vector{}; + versions.resize(to - from + 1); + std::iota(std::begin(versions), std::end(versions), from); + + for_versions(versions, cfg, f); +} + void for_versions(std::vector const& versions, Application& app, std::function const& f) @@ -592,6 +600,22 @@ for_versions(std::vector const& versions, Config const& cfg, } } +void +for_versions(std::vector const& versions, Config const& cfg, + std::function const& f) +{ + REQUIRE(gMustUseTestVersionsWrapper); + + if (std::find(versions.begin(), versions.end(), gTestingVersion) != + versions.end()) + { + REQUIRE(cfg.TESTING_UPGRADE_LEDGER_PROTOCOL_VERSION == gTestingVersion); + Config vcfg = cfg; + vcfg.LEDGER_PROTOCOL_VERSION = gTestingVersion; + f(vcfg); + } +} + void for_all_versions_except(std::vector const& versions, Application& app, std::function const& f) diff --git a/src/test/test.h b/src/test/test.h index ad41a1f5f1..a026102a77 100644 --- a/src/test/test.h +++ b/src/test/test.h @@ -55,6 +55,9 @@ void for_versions_from(uint32 from, Application& app, void for_versions_from(std::vector const& versions, Application& app, std::function const& f); +void for_versions_from(uint32 from, Config const& cfg, + std::function const& f); + void for_all_versions(Application& app, std::function const& f); void for_all_versions(Config const& cfg, @@ -69,9 +72,15 @@ void for_versions(std::vector const& versions, Application& app, void for_versions(uint32 from, uint32 to, Config const& cfg, std::function const& f); +void for_versions(uint32 from, uint32 to, Config const& cfg, + std::function const& f); + void for_versions(std::vector const& versions, Config const& cfg, std::function const& f); +void for_versions(std::vector const& versions, Config const& cfg, + std::function const& f); + void for_all_versions_except(std::vector const& versions, Application& app, std::function const& f); diff --git a/src/testdata/ledger-close-meta-v1-protocol-23-soroban.json b/src/testdata/ledger-close-meta-v1-protocol-23-soroban.json index c7cbfc9ada..28129129ee 100644 --- a/src/testdata/ledger-close-meta-v1-protocol-23-soroban.json +++ b/src/testdata/ledger-close-meta-v1-protocol-23-soroban.json @@ -6,27 +6,27 @@ "v": 0 }, "ledgerHeader": { - "hash": "85a08b356fe8dd72e5ff999b7187f411103df3617c2b7085dbbca99587f9651a", + "hash": "0df2ae22423e7c911902bdd1b387d0c4f4964b7c7af76df0e1f1f3a954d2dcb1", "header": { "ledgerVersion": 23, - "previousLedgerHash": "2b4f5fcff7bd087d4b6e7c6ded785c5ff466b3e0d6f304da8295c9a37197ed96", + "previousLedgerHash": "ec831e454c9b5abe3a8f86c6722e1c1941e8b5669702ce73960566ce0703a3c2", "scpValue": { - "txSetHash": "24740b1ffa1584a68e5bed13068340cc8999577c9ab2eea2173c470d4a8a72d1", + "txSetHash": "dd507a3251ca286285959aee6156152f15b52e5fc46742a047e9c93d39dbb27e", "closeTime": 1451692800, "upgrades": [], "ext": { "v": "STELLAR_VALUE_SIGNED", "lcValueSignature": { "nodeID": "GDDOUW25MRFLNXQMN3OODP6JQEXSGLMHAFZV4XPQ2D3GA4QFIDMEJG2O", - "signature": "497d04432640e9de6ce91feb3723ea87d20a2c34ad9d6c1fe142e6a30e415213cde0ca12a4c514c4660f38a5c9b7151da5562da4c563bca8e7e0abbf39a31909" + "signature": "0587c6e47b6ec9a9d00833af90202a130842e866e25b6436730994a65ce73a8f83c402bccbd1c9a364d08d2d76c4db1a1074752b7ce41177939de31972a63200" } } }, - "txSetResultHash": "65b6fe91abfe43ed98fa2163f08fdf3f2f3231101bba05102521186c25a1cc4b", - "bucketListHash": "8c1bdb45de59b67cfa87badc1b522439bea0cda2e6d453eb67212247ad441fd1", + "txSetResultHash": "6bab0ddd0b6deec3328aa98685f78f0ac172c87d1bb3365344209ca9e59c2f87", + "bucketListHash": "dde7b6e6bf0d7228ce944e382e54cb48a7a4d0941e37f7b4e0d149f81c84bcb2", "ledgerSeq": 28, "totalCoins": 1000000000000000000, - "feePool": 804520, + "feePool": 804593, "inflationSeq": 0, "idPool": 0, "baseFee": 100, @@ -49,7 +49,7 @@ "txSet": { "v": 1, "v1TxSet": { - "previousLedgerHash": "2b4f5fcff7bd087d4b6e7c6ded785c5ff466b3e0d6f304da8295c9a37197ed96", + "previousLedgerHash": "ec831e454c9b5abe3a8f86c6722e1c1941e8b5669702ce73960566ce0703a3c2", "phases": [ { "v": 0, @@ -67,9 +67,9 @@ "type": "ENVELOPE_TYPE_TX", "v1": { "tx": { - "sourceAccount": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", - "fee": 1044855, - "seqNum": 51539607553, + "sourceAccount": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", + "fee": 102519, + "seqNum": 47244640257, "cond": { "type": "PRECOND_NONE" }, @@ -83,48 +83,23 @@ "type": "INVOKE_HOST_FUNCTION", "invokeHostFunctionOp": { "hostFunction": { - "type": "HOST_FUNCTION_TYPE_CREATE_CONTRACT", - "createContract": { - "contractIDPreimage": { - "type": "CONTRACT_ID_PREIMAGE_FROM_ADDRESS", - "fromAddress": { - "address": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", - "salt": "63479ad69a090b258277ec8fba6f99419a2ffb248981510657c944ccd1148e97" + "type": "HOST_FUNCTION_TYPE_INVOKE_CONTRACT", + "invokeContract": { + "contractAddress": "CAA3QKIP2SNVXUJTB4HKOGF55JTSSMQGED3FZYNHMNSXYV3DRRMAWA3Y", + "functionName": "put_persistent", + "args": [ + { + "type": "SCV_SYMBOL", + "sym": "key" + }, + { + "type": "SCV_U64", + "u64": 42 } - }, - "executable": { - "type": "CONTRACT_EXECUTABLE_WASM", - "wasm_hash": "fc644715caaead746e6145f4331ff75c427c965c20d2995a9942b01247515962" - } + ] } }, - "auth": [ - { - "credentials": { - "type": "SOROBAN_CREDENTIALS_SOURCE_ACCOUNT" - }, - "rootInvocation": { - "function": { - "type": "SOROBAN_AUTHORIZED_FUNCTION_TYPE_CREATE_CONTRACT_V2_HOST_FN", - "createContractV2HostFn": { - "contractIDPreimage": { - "type": "CONTRACT_ID_PREIMAGE_FROM_ADDRESS", - "fromAddress": { - "address": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", - "salt": "63479ad69a090b258277ec8fba6f99419a2ffb248981510657c944ccd1148e97" - } - }, - "executable": { - "type": "CONTRACT_EXECUTABLE_WASM", - "wasm_hash": "fc644715caaead746e6145f4331ff75c427c965c20d2995a9942b01247515962" - }, - "constructorArgs": [] - } - }, - "subInvocations": [] - } - } - ] + "auth": [] } } } @@ -133,7 +108,8 @@ "v": 1, "sorobanData": { "ext": { - "v": 0 + "v": 1, + "proofs": [] }, "resources": { "footprint": { @@ -143,33 +119,32 @@ "contractCode": { "hash": "fc644715caaead746e6145f4331ff75c427c965c20d2995a9942b01247515962" } - } - ], - "readWrite": [ + }, { "type": "CONTRACT_DATA", "contractData": { - "contract": "CCOKSYPZJ2B3244CEMLBGUGWPMQ3BLES6AKHRQCX2XF27K4HDBW2LKDF", + "contract": "CAA3QKIP2SNVXUJTB4HKOGF55JTSSMQGED3FZYNHMNSXYV3DRRMAWA3Y", "key": { "type": "SCV_LEDGER_KEY_CONTRACT_INSTANCE" }, "durability": "PERSISTENT" } } - ] + ], + "readWrite": [] }, - "instructions": 200000, - "readBytes": 5000, - "writeBytes": 5000 + "instructions": 4000000, + "readBytes": 10000, + "writeBytes": 1000 }, - "resourceFee": 1043855 + "resourceFee": 101519 } } }, "signatures": [ { - "hint": "4b80097b", - "signature": "645f799c0de7c65cbb64e5fcf7c3aad3229793eb9898267f73b4ad4460482174503926c936c8be9242d35b8a5ac83c243716e0c070f53fa2c051d8aabc50750e" + "hint": "477df904", + "signature": "80ea1e6cb3dec17a01afc91c2956311efdfe7d769f812176e7a8260af10668d052271c6e50a25574853ec3730d501a07ae60b63378ff3c92fc8b6eda4305c604" } ] } @@ -178,9 +153,9 @@ "type": "ENVELOPE_TYPE_TX", "v1": { "tx": { - "sourceAccount": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", - "fee": 102512, - "seqNum": 47244640257, + "sourceAccount": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", + "fee": 1001000, + "seqNum": 34359738369, "cond": { "type": "PRECOND_NONE" }, @@ -191,26 +166,11 @@ { "sourceAccount": null, "body": { - "type": "INVOKE_HOST_FUNCTION", - "invokeHostFunctionOp": { - "hostFunction": { - "type": "HOST_FUNCTION_TYPE_INVOKE_CONTRACT", - "invokeContract": { - "contractAddress": "CAA3QKIP2SNVXUJTB4HKOGF55JTSSMQGED3FZYNHMNSXYV3DRRMAWA3Y", - "functionName": "put_persistent", - "args": [ - { - "type": "SCV_SYMBOL", - "sym": "key" - }, - { - "type": "SCV_U64", - "u64": 42 - } - ] - } - }, - "auth": [] + "type": "RESTORE_FOOTPRINT", + "restoreFootprintOp": { + "ext": { + "v": 0 + } } } } @@ -219,42 +179,38 @@ "v": 1, "sorobanData": { "ext": { - "v": 0 + "v": 1, + "proofs": [] }, "resources": { "footprint": { - "readOnly": [ - { - "type": "CONTRACT_CODE", - "contractCode": { - "hash": "fc644715caaead746e6145f4331ff75c427c965c20d2995a9942b01247515962" - } - }, + "readOnly": [], + "readWrite": [ { "type": "CONTRACT_DATA", "contractData": { "contract": "CAA3QKIP2SNVXUJTB4HKOGF55JTSSMQGED3FZYNHMNSXYV3DRRMAWA3Y", "key": { - "type": "SCV_LEDGER_KEY_CONTRACT_INSTANCE" + "type": "SCV_SYMBOL", + "sym": "archived" }, "durability": "PERSISTENT" } } - ], - "readWrite": [] + ] }, - "instructions": 4000000, - "readBytes": 10000, + "instructions": 0, + "readBytes": 5000, "writeBytes": 1000 }, - "resourceFee": 101512 + "resourceFee": 1000000 } } }, "signatures": [ { - "hint": "477df904", - "signature": "7d9bf0729a1ead0eb7cebaa9012179794fd3c57db24ecb9edcd9505c53c7ca88752131f301aabbde785b5cb4797b2deb11af7eead94b21c36878f0c6c3e81e08" + "hint": "5099a12e", + "signature": "0efeb8e4a0bdad082f3302aff83c577ab032011612b9196f3c7214da098f8b13e81a26c6cf59b83340877618f174196feb49c0e40cbb63d712e2a26814b3f505" } ] } @@ -264,7 +220,7 @@ "v1": { "tx": { "sourceAccount": "GDWWCIR2FIWTY2D3CEDXZYRTRNTFZCC5PBCGC6XPMKCLUV7BRG2AT3RD", - "fee": 127626, + "fee": 127635, "seqNum": 42949672961, "cond": { "type": "PRECOND_NONE" @@ -304,7 +260,8 @@ "v": 1, "sorobanData": { "ext": { - "v": 0 + "v": 1, + "proofs": [] }, "resources": { "footprint": { @@ -344,14 +301,14 @@ "readBytes": 10000, "writeBytes": 1000 }, - "resourceFee": 126626 + "resourceFee": 126635 } } }, "signatures": [ { "hint": "e189b409", - "signature": "7f485f944582388fb4034d9737a8b08940e371b8cac04d0795a876db6476c0969be688184cd86388b989cc73785f6ff8165d1bf293567ff114d9e242481abf0e" + "signature": "d17662536a496eb7867b56531ff6969fd2d37d05b3cdfb1beac4b7671a99d5b3d51c194ece42842c09d589421f902e692d8d946f88ca9766e7341cced8385509" } ] } @@ -360,9 +317,9 @@ "type": "ENVELOPE_TYPE_TX", "v1": { "tx": { - "sourceAccount": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", - "fee": 1001000, - "seqNum": 38654705665, + "sourceAccount": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", + "fee": 1044855, + "seqNum": 51539607553, "cond": { "type": "PRECOND_NONE" }, @@ -373,12 +330,51 @@ { "sourceAccount": null, "body": { - "type": "EXTEND_FOOTPRINT_TTL", - "extendFootprintTTLOp": { - "ext": { - "v": 0 + "type": "INVOKE_HOST_FUNCTION", + "invokeHostFunctionOp": { + "hostFunction": { + "type": "HOST_FUNCTION_TYPE_CREATE_CONTRACT", + "createContract": { + "contractIDPreimage": { + "type": "CONTRACT_ID_PREIMAGE_FROM_ADDRESS", + "fromAddress": { + "address": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", + "salt": "63479ad69a090b258277ec8fba6f99419a2ffb248981510657c944ccd1148e97" + } + }, + "executable": { + "type": "CONTRACT_EXECUTABLE_WASM", + "wasm_hash": "fc644715caaead746e6145f4331ff75c427c965c20d2995a9942b01247515962" + } + } }, - "extendTo": 10000 + "auth": [ + { + "credentials": { + "type": "SOROBAN_CREDENTIALS_SOURCE_ACCOUNT" + }, + "rootInvocation": { + "function": { + "type": "SOROBAN_AUTHORIZED_FUNCTION_TYPE_CREATE_CONTRACT_V2_HOST_FN", + "createContractV2HostFn": { + "contractIDPreimage": { + "type": "CONTRACT_ID_PREIMAGE_FROM_ADDRESS", + "fromAddress": { + "address": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", + "salt": "63479ad69a090b258277ec8fba6f99419a2ffb248981510657c944ccd1148e97" + } + }, + "executable": { + "type": "CONTRACT_EXECUTABLE_WASM", + "wasm_hash": "fc644715caaead746e6145f4331ff75c427c965c20d2995a9942b01247515962" + }, + "constructorArgs": [] + } + }, + "subInvocations": [] + } + } + ] } } } @@ -387,7 +383,8 @@ "v": 1, "sorobanData": { "ext": { - "v": 0 + "v": 1, + "proofs": [] }, "resources": { "footprint": { @@ -397,32 +394,33 @@ "contractCode": { "hash": "fc644715caaead746e6145f4331ff75c427c965c20d2995a9942b01247515962" } - }, + } + ], + "readWrite": [ { "type": "CONTRACT_DATA", "contractData": { - "contract": "CAA3QKIP2SNVXUJTB4HKOGF55JTSSMQGED3FZYNHMNSXYV3DRRMAWA3Y", + "contract": "CCOKSYPZJ2B3244CEMLBGUGWPMQ3BLES6AKHRQCX2XF27K4HDBW2LKDF", "key": { "type": "SCV_LEDGER_KEY_CONTRACT_INSTANCE" }, "durability": "PERSISTENT" } } - ], - "readWrite": [] + ] }, - "instructions": 0, - "readBytes": 10000, - "writeBytes": 0 + "instructions": 200000, + "readBytes": 5000, + "writeBytes": 5000 }, - "resourceFee": 1000000 + "resourceFee": 1043855 } } }, "signatures": [ { - "hint": "f7f60229", - "signature": "eacb5cc7b4df9debd1b1de1f263e26fc85779ffed4ce1e226310ea4b5120aa8661cd253e637277fdad203b773cd1a16a5f9c274b17a2897850a538f94ee9cd05" + "hint": "4b80097b", + "signature": "abb9c642a03090d805e173c6658960733e5b840144f557da4359b0ede36cbaed60da2a21a1c51b80d3d39c98ec372f8ed25f0665bde674116b941c9029cd3202" } ] } @@ -431,9 +429,9 @@ "type": "ENVELOPE_TYPE_TX", "v1": { "tx": { - "sourceAccount": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", + "sourceAccount": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", "fee": 1001000, - "seqNum": 34359738369, + "seqNum": 38654705665, "cond": { "type": "PRECOND_NONE" }, @@ -444,11 +442,12 @@ { "sourceAccount": null, "body": { - "type": "RESTORE_FOOTPRINT", - "restoreFootprintOp": { + "type": "EXTEND_FOOTPRINT_TTL", + "extendFootprintTTLOp": { "ext": { "v": 0 - } + }, + "extendTo": 10000 } } } @@ -457,28 +456,34 @@ "v": 1, "sorobanData": { "ext": { - "v": 0 + "v": 1, + "proofs": [] }, "resources": { "footprint": { - "readOnly": [], - "readWrite": [ + "readOnly": [ + { + "type": "CONTRACT_CODE", + "contractCode": { + "hash": "fc644715caaead746e6145f4331ff75c427c965c20d2995a9942b01247515962" + } + }, { "type": "CONTRACT_DATA", "contractData": { "contract": "CAA3QKIP2SNVXUJTB4HKOGF55JTSSMQGED3FZYNHMNSXYV3DRRMAWA3Y", "key": { - "type": "SCV_SYMBOL", - "sym": "archived" + "type": "SCV_LEDGER_KEY_CONTRACT_INSTANCE" }, "durability": "PERSISTENT" } } - ] + ], + "readWrite": [] }, "instructions": 0, - "readBytes": 5000, - "writeBytes": 1000 + "readBytes": 10000, + "writeBytes": 0 }, "resourceFee": 1000000 } @@ -486,8 +491,8 @@ }, "signatures": [ { - "hint": "5099a12e", - "signature": "62ab56c91919471a63bed4ca3bdbed1d7066ad4babfdfa2ffe8d023d559334651889fac6f8e8caca6a2cc23c9fe4a6d0f2a64cd0fe61e35e6286d5264d8e9108" + "hint": "f7f60229", + "signature": "2556a34f4d4d9a04d9395bd1f0e98b5c8e6884821523ad1d20958a709bc85a53ece5b0d78ee0a09810917d7282200323da0be71ce557e1110d91005954ea3b0f" } ] } @@ -503,18 +508,18 @@ "txProcessing": [ { "result": { - "transactionHash": "62d28c373389d447341e9d75bc84e2c91437169a2a70d3606c8b3aa7d198ef5c", + "transactionHash": "7a4e118eada547afae378dde799a5cb300438dbc4b42a7b51c545edbf56f4c3e", "result": { - "feeCharged": 42954, + "feeCharged": 60566, "result": { - "code": "txFAILED", + "code": "txSUCCESS", "results": [ { "code": "opINNER", "tr": { - "type": "INVOKE_HOST_FUNCTION", - "invokeHostFunctionResult": { - "code": "INVOKE_HOST_FUNCTION_RESOURCE_LIMIT_EXCEEDED" + "type": "EXTEND_FOOTPRINT_TTL", + "extendFootprintTTLResult": { + "code": "EXTEND_FOOTPRINT_TTL_SUCCESS" } } } @@ -529,13 +534,13 @@ { "type": "LEDGER_ENTRY_STATE", "state": { - "lastModifiedLedgerSeq": 12, + "lastModifiedLedgerSeq": 9, "data": { "type": "ACCOUNT", "account": { - "accountID": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", + "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", "balance": 400000000, - "seqNum": 51539607552, + "seqNum": 38654705664, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -559,9 +564,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", - "balance": 398956045, - "seqNum": 51539607552, + "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", + "balance": 398999900, + "seqNum": 38654705664, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -593,9 +598,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", - "balance": 398956045, - "seqNum": 51539607552, + "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", + "balance": 398999900, + "seqNum": 38654705664, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -619,9 +624,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", - "balance": 398956045, - "seqNum": 51539607553, + "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", + "balance": 398999900, + "seqNum": 38654705665, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -663,7 +668,76 @@ } } ], - "operations": [], + "operations": [ + { + "changes": [ + { + "type": "LEDGER_ENTRY_STATE", + "state": { + "lastModifiedLedgerSeq": 6, + "data": { + "type": "TTL", + "ttl": { + "keyHash": "091ddece931776a53f93869b82c24e132cc12d00d961fac09bc3b9cb9021c62d", + "liveUntilLedgerSeq": 10006 + } + }, + "ext": { + "v": 0 + } + } + }, + { + "type": "LEDGER_ENTRY_UPDATED", + "updated": { + "lastModifiedLedgerSeq": 28, + "data": { + "type": "TTL", + "ttl": { + "keyHash": "091ddece931776a53f93869b82c24e132cc12d00d961fac09bc3b9cb9021c62d", + "liveUntilLedgerSeq": 10028 + } + }, + "ext": { + "v": 0 + } + } + }, + { + "type": "LEDGER_ENTRY_STATE", + "state": { + "lastModifiedLedgerSeq": 6, + "data": { + "type": "TTL", + "ttl": { + "keyHash": "60313f9b273db0b14c3e503cf6cc152dd14a0c57e5e81a23e86b4e27a23a2c06", + "liveUntilLedgerSeq": 10006 + } + }, + "ext": { + "v": 0 + } + } + }, + { + "type": "LEDGER_ENTRY_UPDATED", + "updated": { + "lastModifiedLedgerSeq": 28, + "data": { + "type": "TTL", + "ttl": { + "keyHash": "60313f9b273db0b14c3e503cf6cc152dd14a0c57e5e81a23e86b4e27a23a2c06", + "liveUntilLedgerSeq": 10028 + } + }, + "ext": { + "v": 0 + } + } + } + ] + } + ], "txChangesAfter": [ { "type": "LEDGER_ENTRY_STATE", @@ -672,9 +746,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", - "balance": 398956045, - "seqNum": 51539607553, + "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", + "balance": 398999900, + "seqNum": 38654705665, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -722,9 +796,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", - "balance": 399957046, - "seqNum": 51539607553, + "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", + "balance": 399939434, + "seqNum": 38654705665, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -782,9 +856,9 @@ }, { "result": { - "transactionHash": "bb0a6b13caea6b015555dfd332aca1099e8654896bf7d1bcce8432e833a2572a", + "transactionHash": "16e22c43e107e363ba604b2b458eb213b70c0a3df283042ec1722cdc351118e3", "result": { - "feeCharged": 61612, + "feeCharged": 42963, "result": { "code": "txFAILED", "results": [ @@ -793,7 +867,7 @@ "tr": { "type": "INVOKE_HOST_FUNCTION", "invokeHostFunctionResult": { - "code": "INVOKE_HOST_FUNCTION_TRAPPED" + "code": "INVOKE_HOST_FUNCTION_RESOURCE_LIMIT_EXCEEDED" } } } @@ -808,13 +882,13 @@ { "type": "LEDGER_ENTRY_STATE", "state": { - "lastModifiedLedgerSeq": 11, + "lastModifiedLedgerSeq": 12, "data": { "type": "ACCOUNT", "account": { - "accountID": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", + "accountID": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", "balance": 400000000, - "seqNum": 47244640256, + "seqNum": 51539607552, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -838,9 +912,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", - "balance": 399898388, - "seqNum": 47244640256, + "accountID": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", + "balance": 398956045, + "seqNum": 51539607552, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -872,9 +946,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", - "balance": 399898388, - "seqNum": 47244640256, + "accountID": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", + "balance": 398956045, + "seqNum": 51539607552, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -898,9 +972,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", - "balance": 399898388, - "seqNum": 47244640257, + "accountID": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", + "balance": 398956045, + "seqNum": 51539607553, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -951,9 +1025,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", - "balance": 399898388, - "seqNum": 47244640257, + "accountID": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", + "balance": 398956045, + "seqNum": 51539607553, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1001,9 +1075,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", - "balance": 399938388, - "seqNum": 47244640257, + "accountID": "GA2NXNEE2MHWGQP5XXACPYG2BDZFPKGYPFNST5V3ZZN75NSLQAEXX7CU", + "balance": 399957037, + "seqNum": 51539607553, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1061,9 +1135,9 @@ }, { "result": { - "transactionHash": "e310227a8c0d8d1f78632e65ebca281cd60d8619c9afc64491bcce98e7cd7ee3", + "transactionHash": "ccf577b22534829f757fc83a2d76bdaeea44e3d79a48b72067cf8e3ac3e9357b", "result": { - "feeCharged": 106775, + "feeCharged": 106784, "result": { "code": "txSUCCESS", "results": [ @@ -1119,7 +1193,7 @@ "type": "ACCOUNT", "account": { "accountID": "GDWWCIR2FIWTY2D3CEDXZYRTRNTFZCC5PBCGC6XPMKCLUV7BRG2AT3RD", - "balance": 399873274, + "balance": 399873265, "seqNum": 42949672960, "numSubEntries": 0, "inflationDest": null, @@ -1153,7 +1227,7 @@ "type": "ACCOUNT", "account": { "accountID": "GDWWCIR2FIWTY2D3CEDXZYRTRNTFZCC5PBCGC6XPMKCLUV7BRG2AT3RD", - "balance": 399873274, + "balance": 399873265, "seqNum": 42949672960, "numSubEntries": 0, "inflationDest": null, @@ -1179,7 +1253,7 @@ "type": "ACCOUNT", "account": { "accountID": "GDWWCIR2FIWTY2D3CEDXZYRTRNTFZCC5PBCGC6XPMKCLUV7BRG2AT3RD", - "balance": 399873274, + "balance": 399873265, "seqNum": 42949672961, "numSubEntries": 0, "inflationDest": null, @@ -1280,7 +1354,7 @@ "type": "ACCOUNT", "account": { "accountID": "GDWWCIR2FIWTY2D3CEDXZYRTRNTFZCC5PBCGC6XPMKCLUV7BRG2AT3RD", - "balance": 399873274, + "balance": 399873265, "seqNum": 42949672961, "numSubEntries": 0, "inflationDest": null, @@ -1330,7 +1404,7 @@ "type": "ACCOUNT", "account": { "accountID": "GDWWCIR2FIWTY2D3CEDXZYRTRNTFZCC5PBCGC6XPMKCLUV7BRG2AT3RD", - "balance": 399893225, + "balance": 399893216, "seqNum": 42949672961, "numSubEntries": 0, "inflationDest": null, @@ -1388,18 +1462,18 @@ }, { "result": { - "transactionHash": "364ec41dce0a678476ea3ebfc5caa28165ef3bf0976071d858b1c4044f187d25", + "transactionHash": "826c09db23c8efae9c9a8291c690c32c74782fdacf3c6c7f61d3f6622c35e9fd", "result": { - "feeCharged": 60559, + "feeCharged": 61619, "result": { - "code": "txSUCCESS", + "code": "txFAILED", "results": [ { "code": "opINNER", "tr": { - "type": "EXTEND_FOOTPRINT_TTL", - "extendFootprintTTLResult": { - "code": "EXTEND_FOOTPRINT_TTL_SUCCESS" + "type": "INVOKE_HOST_FUNCTION", + "invokeHostFunctionResult": { + "code": "INVOKE_HOST_FUNCTION_TRAPPED" } } } @@ -1414,13 +1488,13 @@ { "type": "LEDGER_ENTRY_STATE", "state": { - "lastModifiedLedgerSeq": 9, + "lastModifiedLedgerSeq": 11, "data": { "type": "ACCOUNT", "account": { - "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", + "accountID": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", "balance": 400000000, - "seqNum": 38654705664, + "seqNum": 47244640256, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1444,9 +1518,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", - "balance": 398999900, - "seqNum": 38654705664, + "accountID": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", + "balance": 399898381, + "seqNum": 47244640256, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1478,9 +1552,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", - "balance": 398999900, - "seqNum": 38654705664, + "accountID": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", + "balance": 399898381, + "seqNum": 47244640256, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1504,9 +1578,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", - "balance": 398999900, - "seqNum": 38654705665, + "accountID": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", + "balance": 399898381, + "seqNum": 47244640257, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1548,76 +1622,7 @@ } } ], - "operations": [ - { - "changes": [ - { - "type": "LEDGER_ENTRY_STATE", - "state": { - "lastModifiedLedgerSeq": 6, - "data": { - "type": "TTL", - "ttl": { - "keyHash": "091ddece931776a53f93869b82c24e132cc12d00d961fac09bc3b9cb9021c62d", - "liveUntilLedgerSeq": 10006 - } - }, - "ext": { - "v": 0 - } - } - }, - { - "type": "LEDGER_ENTRY_UPDATED", - "updated": { - "lastModifiedLedgerSeq": 28, - "data": { - "type": "TTL", - "ttl": { - "keyHash": "091ddece931776a53f93869b82c24e132cc12d00d961fac09bc3b9cb9021c62d", - "liveUntilLedgerSeq": 10028 - } - }, - "ext": { - "v": 0 - } - } - }, - { - "type": "LEDGER_ENTRY_STATE", - "state": { - "lastModifiedLedgerSeq": 6, - "data": { - "type": "TTL", - "ttl": { - "keyHash": "60313f9b273db0b14c3e503cf6cc152dd14a0c57e5e81a23e86b4e27a23a2c06", - "liveUntilLedgerSeq": 10006 - } - }, - "ext": { - "v": 0 - } - } - }, - { - "type": "LEDGER_ENTRY_UPDATED", - "updated": { - "lastModifiedLedgerSeq": 28, - "data": { - "type": "TTL", - "ttl": { - "keyHash": "60313f9b273db0b14c3e503cf6cc152dd14a0c57e5e81a23e86b4e27a23a2c06", - "liveUntilLedgerSeq": 10028 - } - }, - "ext": { - "v": 0 - } - } - } - ] - } - ], + "operations": [], "txChangesAfter": [ { "type": "LEDGER_ENTRY_STATE", @@ -1626,9 +1631,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", - "balance": 398999900, - "seqNum": 38654705665, + "accountID": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", + "balance": 399898381, + "seqNum": 47244640257, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1676,9 +1681,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", - "balance": 399939441, - "seqNum": 38654705665, + "accountID": "GAM4XNEHJUHN7VE3ZWJI23R2WS3SJS2BTUHZAC6XICXLXO2HPX4QI2IR", + "balance": 399938381, + "seqNum": 47244640257, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -1736,9 +1741,9 @@ }, { "result": { - "transactionHash": "ee68d27257fa137933de22b3fdfbc4a736ec01af29a9e25e5b807252b1a1ca0a", + "transactionHash": "742cd3385bedce54d59e1d9751a34e91beafeb9f6afcbded05f2e016e8b51b2c", "result": { - "feeCharged": 51547, + "feeCharged": 51555, "result": { "code": "txSUCCESS", "results": [ @@ -1993,7 +1998,7 @@ "type": "ACCOUNT", "account": { "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", - "balance": 399948453, + "balance": 399948445, "seqNum": 34359738369, "numSubEntries": 0, "inflationDest": null, diff --git a/src/testdata/ledger-close-meta-v1-protocol-23.json b/src/testdata/ledger-close-meta-v1-protocol-23.json index 182097130e..164bfe461b 100644 --- a/src/testdata/ledger-close-meta-v1-protocol-23.json +++ b/src/testdata/ledger-close-meta-v1-protocol-23.json @@ -6,24 +6,24 @@ "v": 0 }, "ledgerHeader": { - "hash": "544367e4a7271f352eb91566e9e748ed03246b8a445bda3c70b1e93e3d751f7b", + "hash": "df0caa7841395c80b0f7b3dec4f13e5058a406fd0cb0959375c6830ea311e32e", "header": { "ledgerVersion": 23, - "previousLedgerHash": "a8fa722c806905e1bf3b3cb7750db78031ad228c2812d2a3479b06db95b0bd43", + "previousLedgerHash": "238186da4a6e457877adec84246cbb50dd054cc81cd913ef97cffb492ff6ac74", "scpValue": { - "txSetHash": "969ab56f99763716d1fc4480bfe27bb5dfaaf1666dd44cdcd6c51d8fc9074d2c", + "txSetHash": "6755cddd3f4b967d42930b3eb84bbd991ccf2f0ddec05fc85f24e77dcd6746d7", "closeTime": 0, "upgrades": [], "ext": { "v": "STELLAR_VALUE_SIGNED", "lcValueSignature": { "nodeID": "GDDOUW25MRFLNXQMN3OODP6JQEXSGLMHAFZV4XPQ2D3GA4QFIDMEJG2O", - "signature": "f6f4db8cc07c1d2f07f95b0b221a8ce55ae214dc51d82a71432352e6b3a4583b5930f643110a18bec0684d884d68d066ef3d0a051019f74648f9ef38d17c5707" + "signature": "2d46696da63265a28b3055ccc9cdb2aa32d1008f4732f130dcda8842235a2419ac5432290892243f636563e42fa9b3829f31b8daef8b1e1142de52a770c3130f" } } }, - "txSetResultHash": "f66233c106977a4cc148e019411ff6ddfaf76c337d004ed9a304a70407b161d0", - "bucketListHash": "e9db6e23db1e6a733b8d285e5ca8eab8c3b6b7d8d1bace90afe02df7ce0dbecb", + "txSetResultHash": "249b974bacf8b5c4a8f0b5598194c1b9eca64af0b5c1506daa871c1533b6baac", + "bucketListHash": "5ba9bbd81fb831cf30cf89b221629d376e563373bc6b56e1c44e82adca5e427f", "ledgerSeq": 7, "totalCoins": 1000000000000000000, "feePool": 800, @@ -49,7 +49,7 @@ "txSet": { "v": 1, "v1TxSet": { - "previousLedgerHash": "a8fa722c806905e1bf3b3cb7750db78031ad228c2812d2a3479b06db95b0bd43", + "previousLedgerHash": "238186da4a6e457877adec84246cbb50dd054cc81cd913ef97cffb492ff6ac74", "phases": [ { "v": 0, @@ -185,43 +185,22 @@ "txProcessing": [ { "result": { - "transactionHash": "324d0628e2a215d367f181f0e3aacbaa26fa638e676e73fb9ad26a360314a7b7", + "transactionHash": "0db2322d85e9d8ea2421559922bb6107429650ebdad304c907480853d465c10d", "result": { - "feeCharged": 300, + "feeCharged": 100, "result": { - "code": "txFEE_BUMP_INNER_SUCCESS", - "innerResultPair": { - "transactionHash": "b28c171f9658320b5ce8d50e4e1a36b74afbb2a92eec7df92a8981067131b025", - "result": { - "feeCharged": 200, - "result": { - "code": "txSUCCESS", - "results": [ - { - "code": "opINNER", - "tr": { - "type": "PAYMENT", - "paymentResult": { - "code": "PAYMENT_SUCCESS" - } - } - }, - { - "code": "opINNER", - "tr": { - "type": "PAYMENT", - "paymentResult": { - "code": "PAYMENT_SUCCESS" - } - } - } - ] - }, - "ext": { - "v": 0 + "code": "txSUCCESS", + "results": [ + { + "code": "opINNER", + "tr": { + "type": "PAYMENT", + "paymentResult": { + "code": "PAYMENT_SUCCESS" + } } } - } + ] }, "ext": { "v": 0 @@ -232,13 +211,13 @@ { "type": "LEDGER_ENTRY_STATE", "state": { - "lastModifiedLedgerSeq": 4, + "lastModifiedLedgerSeq": 5, "data": { "type": "ACCOUNT", "account": { - "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", - "balance": 400000000, - "seqNum": 17179869184, + "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", + "balance": 999999998999989700, + "seqNum": 3, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -246,7 +225,31 @@ "thresholds": "01000000", "signers": [], "ext": { - "v": 0 + "v": 1, + "v1": { + "liabilities": { + "buying": 0, + "selling": 0 + }, + "ext": { + "v": 2, + "v2": { + "numSponsored": 0, + "numSponsoring": 0, + "signerSponsoringIDs": [], + "ext": { + "v": 3, + "v3": { + "ext": { + "v": 0 + }, + "seqLedger": 5, + "seqTime": 0 + } + } + } + } + } } } }, @@ -262,9 +265,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", - "balance": 399999700, - "seqNum": 17179869184, + "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", + "balance": 999999998999989600, + "seqNum": 3, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -272,7 +275,31 @@ "thresholds": "01000000", "signers": [], "ext": { - "v": 0 + "v": 1, + "v1": { + "liabilities": { + "buying": 0, + "selling": 0 + }, + "ext": { + "v": 2, + "v2": { + "numSponsored": 0, + "numSponsoring": 0, + "signerSponsoringIDs": [], + "ext": { + "v": 3, + "v3": { + "ext": { + "v": 0 + }, + "seqLedger": 5, + "seqTime": 0 + } + } + } + } + } } } }, @@ -296,61 +323,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", - "balance": 399999700, - "seqNum": 17179869184, - "numSubEntries": 0, - "inflationDest": null, - "flags": 0, - "homeDomain": "", - "thresholds": "01000000", - "signers": [], - "ext": { - "v": 0 - } - } - }, - "ext": { - "v": 0 - } - } - }, - { - "type": "LEDGER_ENTRY_UPDATED", - "updated": { - "lastModifiedLedgerSeq": 7, - "data": { - "type": "ACCOUNT", - "account": { - "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", - "balance": 399999700, - "seqNum": 17179869184, - "numSubEntries": 0, - "inflationDest": null, - "flags": 0, - "homeDomain": "", - "thresholds": "01000000", - "signers": [], - "ext": { - "v": 0 - } - } - }, - "ext": { - "v": 0 - } - } - }, - { - "type": "LEDGER_ENTRY_STATE", - "state": { - "lastModifiedLedgerSeq": 5, - "data": { - "type": "ACCOUNT", - "account": { - "accountID": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q", - "balance": 200010000, - "seqNum": 21474836480, + "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", + "balance": 999999998999989600, + "seqNum": 3, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -358,7 +333,31 @@ "thresholds": "01000000", "signers": [], "ext": { - "v": 0 + "v": 1, + "v1": { + "liabilities": { + "buying": 0, + "selling": 0 + }, + "ext": { + "v": 2, + "v2": { + "numSponsored": 0, + "numSponsoring": 0, + "signerSponsoringIDs": [], + "ext": { + "v": 3, + "v3": { + "ext": { + "v": 0 + }, + "seqLedger": 5, + "seqTime": 0 + } + } + } + } + } } } }, @@ -374,9 +373,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q", - "balance": 200010000, - "seqNum": 21474836481, + "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", + "balance": 999999998999989600, + "seqNum": 4, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -426,18 +425,43 @@ "state": { "lastModifiedLedgerSeq": 6, "data": { - "type": "TRUSTLINE", - "trustLine": { + "type": "ACCOUNT", + "account": { "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", - "asset": { - "assetCode": "CUR1", - "issuer": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q" - }, - "balance": 0, - "limit": 100, - "flags": 1, + "balance": 399999900, + "seqNum": 12884901889, + "numSubEntries": 1, + "inflationDest": null, + "flags": 0, + "homeDomain": "", + "thresholds": "01000000", + "signers": [], "ext": { - "v": 0 + "v": 1, + "v1": { + "liabilities": { + "buying": 0, + "selling": 0 + }, + "ext": { + "v": 2, + "v2": { + "numSponsored": 0, + "numSponsoring": 0, + "signerSponsoringIDs": [], + "ext": { + "v": 3, + "v3": { + "ext": { + "v": 0 + }, + "seqLedger": 6, + "seqTime": 0 + } + } + } + } + } } } }, @@ -451,18 +475,43 @@ "updated": { "lastModifiedLedgerSeq": 7, "data": { - "type": "TRUSTLINE", - "trustLine": { + "type": "ACCOUNT", + "account": { "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", - "asset": { - "assetCode": "CUR1", - "issuer": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q" - }, - "balance": 50, - "limit": 100, - "flags": 1, + "balance": 400000900, + "seqNum": 12884901889, + "numSubEntries": 1, + "inflationDest": null, + "flags": 0, + "homeDomain": "", + "thresholds": "01000000", + "signers": [], "ext": { - "v": 0 + "v": 1, + "v1": { + "liabilities": { + "buying": 0, + "selling": 0 + }, + "ext": { + "v": 2, + "v2": { + "numSponsored": 0, + "numSponsoring": 0, + "signerSponsoringIDs": [], + "ext": { + "v": 3, + "v3": { + "ext": { + "v": 0 + }, + "seqLedger": 6, + "seqTime": 0 + } + } + } + } + } } } }, @@ -470,28 +519,49 @@ "v": 0 } } - } - ] - }, - { - "changes": [ + }, { "type": "LEDGER_ENTRY_STATE", "state": { "lastModifiedLedgerSeq": 7, "data": { - "type": "TRUSTLINE", - "trustLine": { - "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", - "asset": { - "assetCode": "CUR1", - "issuer": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q" - }, - "balance": 50, - "limit": 100, - "flags": 1, + "type": "ACCOUNT", + "account": { + "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", + "balance": 999999998999989600, + "seqNum": 4, + "numSubEntries": 0, + "inflationDest": null, + "flags": 0, + "homeDomain": "", + "thresholds": "01000000", + "signers": [], "ext": { - "v": 0 + "v": 1, + "v1": { + "liabilities": { + "buying": 0, + "selling": 0 + }, + "ext": { + "v": 2, + "v2": { + "numSponsored": 0, + "numSponsoring": 0, + "signerSponsoringIDs": [], + "ext": { + "v": 3, + "v3": { + "ext": { + "v": 0 + }, + "seqLedger": 7, + "seqTime": 0 + } + } + } + } + } } } }, @@ -505,18 +575,43 @@ "updated": { "lastModifiedLedgerSeq": 7, "data": { - "type": "TRUSTLINE", - "trustLine": { - "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", - "asset": { - "assetCode": "CUR1", - "issuer": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q" - }, - "balance": 100, - "limit": 100, - "flags": 1, + "type": "ACCOUNT", + "account": { + "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", + "balance": 999999998999988600, + "seqNum": 4, + "numSubEntries": 0, + "inflationDest": null, + "flags": 0, + "homeDomain": "", + "thresholds": "01000000", + "signers": [], "ext": { - "v": 0 + "v": 1, + "v1": { + "liabilities": { + "buying": 0, + "selling": 0 + }, + "ext": { + "v": 2, + "v2": { + "numSponsored": 0, + "numSponsoring": 0, + "signerSponsoringIDs": [], + "ext": { + "v": 3, + "v3": { + "ext": { + "v": 0 + }, + "seqLedger": 7, + "seqTime": 0 + } + } + } + } + } } } }, @@ -535,22 +630,43 @@ }, { "result": { - "transactionHash": "0db2322d85e9d8ea2421559922bb6107429650ebdad304c907480853d465c10d", + "transactionHash": "324d0628e2a215d367f181f0e3aacbaa26fa638e676e73fb9ad26a360314a7b7", "result": { - "feeCharged": 100, + "feeCharged": 300, "result": { - "code": "txSUCCESS", - "results": [ - { - "code": "opINNER", - "tr": { - "type": "PAYMENT", - "paymentResult": { - "code": "PAYMENT_SUCCESS" - } + "code": "txFEE_BUMP_INNER_SUCCESS", + "innerResultPair": { + "transactionHash": "b28c171f9658320b5ce8d50e4e1a36b74afbb2a92eec7df92a8981067131b025", + "result": { + "feeCharged": 200, + "result": { + "code": "txSUCCESS", + "results": [ + { + "code": "opINNER", + "tr": { + "type": "PAYMENT", + "paymentResult": { + "code": "PAYMENT_SUCCESS" + } + } + }, + { + "code": "opINNER", + "tr": { + "type": "PAYMENT", + "paymentResult": { + "code": "PAYMENT_SUCCESS" + } + } + } + ] + }, + "ext": { + "v": 0 } } - ] + } }, "ext": { "v": 0 @@ -561,13 +677,13 @@ { "type": "LEDGER_ENTRY_STATE", "state": { - "lastModifiedLedgerSeq": 5, + "lastModifiedLedgerSeq": 4, "data": { "type": "ACCOUNT", "account": { - "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", - "balance": 999999998999989700, - "seqNum": 3, + "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", + "balance": 400000000, + "seqNum": 17179869184, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -575,31 +691,7 @@ "thresholds": "01000000", "signers": [], "ext": { - "v": 1, - "v1": { - "liabilities": { - "buying": 0, - "selling": 0 - }, - "ext": { - "v": 2, - "v2": { - "numSponsored": 0, - "numSponsoring": 0, - "signerSponsoringIDs": [], - "ext": { - "v": 3, - "v3": { - "ext": { - "v": 0 - }, - "seqLedger": 5, - "seqTime": 0 - } - } - } - } - } + "v": 0 } } }, @@ -615,9 +707,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", - "balance": 999999998999989600, - "seqNum": 3, + "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", + "balance": 399999700, + "seqNum": 17179869184, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -625,57 +717,85 @@ "thresholds": "01000000", "signers": [], "ext": { - "v": 1, - "v1": { - "liabilities": { - "buying": 0, - "selling": 0 - }, + "v": 0 + } + } + }, + "ext": { + "v": 0 + } + } + } + ], + "txApplyProcessing": { + "v": 3, + "v3": { + "ext": { + "v": 0 + }, + "txChangesBefore": [ + { + "type": "LEDGER_ENTRY_STATE", + "state": { + "lastModifiedLedgerSeq": 7, + "data": { + "type": "ACCOUNT", + "account": { + "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", + "balance": 399999700, + "seqNum": 17179869184, + "numSubEntries": 0, + "inflationDest": null, + "flags": 0, + "homeDomain": "", + "thresholds": "01000000", + "signers": [], "ext": { - "v": 2, - "v2": { - "numSponsored": 0, - "numSponsoring": 0, - "signerSponsoringIDs": [], - "ext": { - "v": 3, - "v3": { - "ext": { - "v": 0 - }, - "seqLedger": 5, - "seqTime": 0 - } - } - } + "v": 0 + } + } + }, + "ext": { + "v": 0 + } + } + }, + { + "type": "LEDGER_ENTRY_UPDATED", + "updated": { + "lastModifiedLedgerSeq": 7, + "data": { + "type": "ACCOUNT", + "account": { + "accountID": "GCAEBM3GKNR6SV6N73FSGBXU6NSMZ2URQVMJQHXFQFY2PJPX6YBCSAKZ", + "balance": 399999700, + "seqNum": 17179869184, + "numSubEntries": 0, + "inflationDest": null, + "flags": 0, + "homeDomain": "", + "thresholds": "01000000", + "signers": [], + "ext": { + "v": 0 } } + }, + "ext": { + "v": 0 } } }, - "ext": { - "v": 0 - } - } - } - ], - "txApplyProcessing": { - "v": 3, - "v3": { - "ext": { - "v": 0 - }, - "txChangesBefore": [ { "type": "LEDGER_ENTRY_STATE", "state": { - "lastModifiedLedgerSeq": 7, + "lastModifiedLedgerSeq": 5, "data": { "type": "ACCOUNT", "account": { - "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", - "balance": 999999998999989600, - "seqNum": 3, + "accountID": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q", + "balance": 200010000, + "seqNum": 21474836480, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -683,31 +803,7 @@ "thresholds": "01000000", "signers": [], "ext": { - "v": 1, - "v1": { - "liabilities": { - "buying": 0, - "selling": 0 - }, - "ext": { - "v": 2, - "v2": { - "numSponsored": 0, - "numSponsoring": 0, - "signerSponsoringIDs": [], - "ext": { - "v": 3, - "v3": { - "ext": { - "v": 0 - }, - "seqLedger": 5, - "seqTime": 0 - } - } - } - } - } + "v": 0 } } }, @@ -723,9 +819,9 @@ "data": { "type": "ACCOUNT", "account": { - "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", - "balance": 999999998999989600, - "seqNum": 4, + "accountID": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q", + "balance": 200010000, + "seqNum": 21474836481, "numSubEntries": 0, "inflationDest": null, "flags": 0, @@ -775,43 +871,18 @@ "state": { "lastModifiedLedgerSeq": 6, "data": { - "type": "ACCOUNT", - "account": { + "type": "TRUSTLINE", + "trustLine": { "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", - "balance": 399999900, - "seqNum": 12884901889, - "numSubEntries": 1, - "inflationDest": null, - "flags": 0, - "homeDomain": "", - "thresholds": "01000000", - "signers": [], + "asset": { + "assetCode": "CUR1", + "issuer": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q" + }, + "balance": 0, + "limit": 100, + "flags": 1, "ext": { - "v": 1, - "v1": { - "liabilities": { - "buying": 0, - "selling": 0 - }, - "ext": { - "v": 2, - "v2": { - "numSponsored": 0, - "numSponsoring": 0, - "signerSponsoringIDs": [], - "ext": { - "v": 3, - "v3": { - "ext": { - "v": 0 - }, - "seqLedger": 6, - "seqTime": 0 - } - } - } - } - } + "v": 0 } } }, @@ -825,43 +896,18 @@ "updated": { "lastModifiedLedgerSeq": 7, "data": { - "type": "ACCOUNT", - "account": { + "type": "TRUSTLINE", + "trustLine": { "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", - "balance": 400000900, - "seqNum": 12884901889, - "numSubEntries": 1, - "inflationDest": null, - "flags": 0, - "homeDomain": "", - "thresholds": "01000000", - "signers": [], + "asset": { + "assetCode": "CUR1", + "issuer": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q" + }, + "balance": 50, + "limit": 100, + "flags": 1, "ext": { - "v": 1, - "v1": { - "liabilities": { - "buying": 0, - "selling": 0 - }, - "ext": { - "v": 2, - "v2": { - "numSponsored": 0, - "numSponsoring": 0, - "signerSponsoringIDs": [], - "ext": { - "v": 3, - "v3": { - "ext": { - "v": 0 - }, - "seqLedger": 6, - "seqTime": 0 - } - } - } - } - } + "v": 0 } } }, @@ -869,49 +915,28 @@ "v": 0 } } - }, + } + ] + }, + { + "changes": [ { "type": "LEDGER_ENTRY_STATE", "state": { "lastModifiedLedgerSeq": 7, "data": { - "type": "ACCOUNT", - "account": { - "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", - "balance": 999999998999989600, - "seqNum": 4, - "numSubEntries": 0, - "inflationDest": null, - "flags": 0, - "homeDomain": "", - "thresholds": "01000000", - "signers": [], + "type": "TRUSTLINE", + "trustLine": { + "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", + "asset": { + "assetCode": "CUR1", + "issuer": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q" + }, + "balance": 50, + "limit": 100, + "flags": 1, "ext": { - "v": 1, - "v1": { - "liabilities": { - "buying": 0, - "selling": 0 - }, - "ext": { - "v": 2, - "v2": { - "numSponsored": 0, - "numSponsoring": 0, - "signerSponsoringIDs": [], - "ext": { - "v": 3, - "v3": { - "ext": { - "v": 0 - }, - "seqLedger": 7, - "seqTime": 0 - } - } - } - } - } + "v": 0 } } }, @@ -925,43 +950,18 @@ "updated": { "lastModifiedLedgerSeq": 7, "data": { - "type": "ACCOUNT", - "account": { - "accountID": "GC4EFXBN6BEENDAX7PBW5PGIIIVH3INMD3OEPQASXOLGOHVVP7ZEMG7X", - "balance": 999999998999988600, - "seqNum": 4, - "numSubEntries": 0, - "inflationDest": null, - "flags": 0, - "homeDomain": "", - "thresholds": "01000000", - "signers": [], + "type": "TRUSTLINE", + "trustLine": { + "accountID": "GB6MXQ5262ZJGDQNA6BL4TWE5SADVZXIKLPELFXKUE27X4SQTGQS44ZB", + "asset": { + "assetCode": "CUR1", + "issuer": "GCGE27HU2VYQANKL2VZWLCAOJYMEFST5DXPBWQ7BRRPOHUPK626DNG4Q" + }, + "balance": 100, + "limit": 100, + "flags": 1, "ext": { - "v": 1, - "v1": { - "liabilities": { - "buying": 0, - "selling": 0 - }, - "ext": { - "v": 2, - "v2": { - "numSponsored": 0, - "numSponsoring": 0, - "signerSponsoringIDs": [], - "ext": { - "v": 3, - "v3": { - "ext": { - "v": 0 - }, - "seqLedger": 7, - "seqTime": 0 - } - } - } - } - } + "v": 0 } } }, @@ -981,7 +981,7 @@ ], "upgradesProcessing": [], "scpInfo": [], - "totalByteSizeOfBucketList": 1021, + "totalByteSizeOfBucketList": 1023, "evictedTemporaryLedgerKeys": [], "evictedPersistentLedgerEntries": [] } diff --git a/src/transactions/FeeBumpTransactionFrame.cpp b/src/transactions/FeeBumpTransactionFrame.cpp index 7e5e74e91f..975ed188c3 100644 --- a/src/transactions/FeeBumpTransactionFrame.cpp +++ b/src/transactions/FeeBumpTransactionFrame.cpp @@ -54,6 +54,20 @@ FeeBumpTransactionFrame::sorobanResources() const return mInnerTx->sorobanResources(); } +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION +bool +FeeBumpTransactionFrame::hasSorobanProofs() const +{ + return mInnerTx->hasSorobanProofs(); +} + +xdr::xvector const& +FeeBumpTransactionFrame::sorobanProofs() const +{ + return mInnerTx->sorobanProofs(); +} +#endif + FeeBumpTransactionFrame::FeeBumpTransactionFrame( Hash const& networkID, TransactionEnvelope const& envelope) : mEnvelope(envelope) diff --git a/src/transactions/FeeBumpTransactionFrame.h b/src/transactions/FeeBumpTransactionFrame.h index 3190e31cfd..e1c32bbfaf 100644 --- a/src/transactions/FeeBumpTransactionFrame.h +++ b/src/transactions/FeeBumpTransactionFrame.h @@ -139,5 +139,10 @@ class FeeBumpTransactionFrame : public TransactionFrameBase SorobanResources const& sorobanResources() const override; virtual int64 declaredSorobanResourceFee() const override; virtual bool XDRProvidesValidFee() const override; + +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + bool hasSorobanProofs() const override; + xdr::xvector const& sorobanProofs() const override; +#endif }; } diff --git a/src/transactions/InvokeHostFunctionOpFrame.cpp b/src/transactions/InvokeHostFunctionOpFrame.cpp index 2f50f1db1e..e0d5b78c57 100644 --- a/src/transactions/InvokeHostFunctionOpFrame.cpp +++ b/src/transactions/InvokeHostFunctionOpFrame.cpp @@ -6,12 +6,14 @@ // This needs to be included first #include "TransactionUtils.h" #include "util/GlobalChecks.h" +#include "util/ProtocolVersion.h" #include "xdr/Stellar-ledger-entries.h" #include #include #include #include #include "xdr/Stellar-contract.h" +#include "util/ArchivalProofs.h" #include "rust/RustVecXdrMarshal.h" // clang-format on @@ -337,13 +339,14 @@ InvokeHostFunctionOpFrame::doApply( auto const& footprint = resources.footprint; auto footprintLength = footprint.readOnly.size() + footprint.readWrite.size(); + auto& bm = app.getBucketManager(); ledgerEntryCxxBufs.reserve(footprintLength); ttlEntryCxxBufs.reserve(footprintLength); - auto addReads = [&ledgerEntryCxxBufs, &ttlEntryCxxBufs, <x, &metrics, - &resources, &sorobanConfig, &appConfig, sorobanData, &res, - this](auto const& keys) -> bool { + auto addReads = [&app, &ledgerEntryCxxBufs, &ttlEntryCxxBufs, <x, + &metrics, &resources, &sorobanConfig, &appConfig, + sorobanData, &res, &bm, this](auto const& keys) -> bool { for (auto const& lk : keys) { uint32_t keySize = static_cast(xdr::xdr_size(lk)); @@ -397,6 +400,64 @@ InvokeHostFunctionOpFrame::doApply( } } // If ttlLtxe doesn't exist, this is a new Soroban entry +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + + // First check Hot Archive + if (isPersistentEntry(lk) && + protocolVersionStartsFrom( + ltx.getHeader().ledgerVersion, + Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + auto hotArchive = + bm.getSearchableHotArchiveBucketListSnapshot(); + auto hotArchiveEntry = hotArchive->load(lk); + + // Entries require proofs only if an ARCHIVED entry + // exists in the hot archive + if (hotArchiveEntry && + hotArchiveEntry->type() != HOT_ARCHIVE_DELETED) + { + if (lk.type() == CONTRACT_CODE) + { + sorobanData->pushApplyTimeDiagnosticError( + appConfig, SCE_VALUE, SCEC_INVALID_INPUT, + "trying to access an archived contract " + "code " + "entry", + {makeBytesSCVal(lk.contractCode().hash)}); + } + else if (lk.type() == CONTRACT_DATA) + { + sorobanData->pushApplyTimeDiagnosticError( + appConfig, SCE_VALUE, SCEC_INVALID_INPUT, + "trying to access an archived contract " + "data " + "entry", + {makeAddressSCVal(lk.contractData().contract), + lk.contractData().key}); + } + // Cannot access an archived entry + this->innerResult(res).code( + INVOKE_HOST_FUNCTION_ENTRY_ARCHIVED); + return false; + } + + if (!isCreatedKeyProven(app, lk, mParentTx.sorobanProofs())) + { + sorobanData->pushApplyTimeDiagnosticError( + appConfig, SCE_VALUE, SCEC_INVALID_INPUT, + "invalid creation proof for new contract data " + "entry", + {makeAddressSCVal(lk.contractData().contract), + lk.contractData().key}); + + // TODO: Switch to new code once Rust XDR is updated + this->innerResult(res).code( + INVOKE_HOST_FUNCTION_ENTRY_ARCHIVED); + return false; + } + } +#endif } if (!isSorobanEntry(lk) || sorobanEntryLive) @@ -718,6 +779,23 @@ InvokeHostFunctionOpFrame::doCheckValidForSoroban( return false; } } + +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + if (protocolVersionStartsFrom( + ledgerVersion, + Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + if (!mParentTx.hasSorobanProofs() || + !checkCreationProofValidity(mParentTx.sorobanProofs())) + { + sorobanData.pushValidationTimeDiagnosticError( + appConfig, SCE_VALUE, SCEC_INVALID_INPUT, + "invalid creation proof"); + return false; + } + } +#endif + return true; } diff --git a/src/transactions/RestoreFootprintOpFrame.cpp b/src/transactions/RestoreFootprintOpFrame.cpp index 1d697c59a7..2bd518837c 100644 --- a/src/transactions/RestoreFootprintOpFrame.cpp +++ b/src/transactions/RestoreFootprintOpFrame.cpp @@ -9,7 +9,11 @@ #include "medida/meter.h" #include "medida/timer.h" #include "transactions/MutableTransactionResult.h" +#include "util/ArchivalProofs.h" +#include "util/ProtocolVersion.h" +#include "xdr/Stellar-ledger-entries.h" #include +#include namespace stellar { @@ -76,25 +80,90 @@ RestoreFootprintOpFrame::doApply( rustEntryRentChanges.reserve(footprint.readWrite.size()); for (auto const& lk : footprint.readWrite) { + std::shared_ptr evictedEntryToRestore{nullptr}; auto ttlKey = getTTLKey(lk); { auto constTTLLtxe = ltx.loadWithoutRecord(ttlKey); - // Skip entry if the TTLEntry is missing or if it's already live. - if (!constTTLLtxe || isLive(constTTLLtxe.current(), ledgerSeq)) + + // Before p23, entries that did not exist were skipped, but the + // transaction still succeeded. + if (protocolVersionIsBefore( + ltx.getHeader().ledgerVersion, + Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + if (!constTTLLtxe) + { + // Skip entry if it doesn't exist in protocol 22 + continue; + } + // Skip entry if it's already live. + else if (isLive(constTTLLtxe.current(), ledgerSeq)) + { + continue; + } + } + else { - continue; + // In protocol 23 and later, entries that do not exist and do + // not have a proof fail. + if (!constTTLLtxe) + { + evictedEntryToRestore = getRestoredEntryFromProof( + app, lk, mParentTx.sorobanProofs()); + if (!evictedEntryToRestore) + { + // TODO: Switch to RESTORE_FOOTPRINT_INVALID_PROOF + // when XDR changes in rust are merged + innerResult(res).code(RESTORE_FOOTPRINT_MALFORMED); + if (lk.type() == CONTRACT_CODE) + { + sorobanData->pushApplyTimeDiagnosticError( + appConfig, SCE_VALUE, SCEC_INVALID_INPUT, + "invalid proof for contract code entry", + {makeBytesSCVal(lk.contractCode().hash)}); + } + else + { + sorobanData->pushApplyTimeDiagnosticError( + appConfig, SCE_VALUE, SCEC_INVALID_INPUT, + "invalid proof for contract data entry", + {makeAddressSCVal(lk.contractData().contract), + lk.contractData().key}); + } + + return false; + } + } + // Skip entry if it's already live. + else if (isLive(constTTLLtxe.current(), ledgerSeq)) + { + continue; + } } } // We must load the ContractCode/ContractData entry for fee purposes, as // restore is considered a write - auto constEntryLtxe = ltx.loadWithoutRecord(lk); + uint32_t entrySize = 0; + + // If entry exists in the Live BucketList and does not need to be + // created + if (!evictedEntryToRestore) + { + auto constEntryLtxe = ltx.loadWithoutRecord(lk); - // We checked for TTLEntry existence above - releaseAssertOrThrow(constEntryLtxe); + // We checked for TTLEntry existence above + releaseAssertOrThrow(constEntryLtxe); + + entrySize = + static_cast(xdr::xdr_size(constEntryLtxe.current())); + } + else + { + entrySize = + static_cast(xdr::xdr_size(*evictedEntryToRestore)); + } - uint32_t entrySize = - static_cast(xdr::xdr_size(constEntryLtxe.current())); metrics.mLedgerReadByte += entrySize; if (resources.readBytes < metrics.mLedgerReadByte) { @@ -138,11 +207,25 @@ RestoreFootprintOpFrame::doApply( rustChange.new_size_bytes = entrySize; rustChange.new_live_until_ledger = restoredLiveUntilLedger; - // Entry exists if we get this this point due to the constTTLLtxe - // loadWithoutRecord logic above. - auto ttlLtxe = ltx.load(ttlKey); - ttlLtxe.current().data.ttl().liveUntilLedgerSeq = - restoredLiveUntilLedger; + if (evictedEntryToRestore) + { + // TODO: Pipe new "restored" change throughout ltx + // TODO: Delete entries from Hot Archive + ltx.create(*evictedEntryToRestore); + LedgerEntry ttl; + ttl.data.type(TTL); + ttl.data.ttl().liveUntilLedgerSeq = restoredLiveUntilLedger; + ttl.data.ttl().keyHash = getTTLKey(lk).ttl().keyHash; + ltx.create(ttl); + } + else + { + // Entry exists if we get this this point due to the constTTLLtxe + // loadWithoutRecord logic above. + auto ttlLtxe = ltx.load(ttlKey); + ttlLtxe.current().data.ttl().liveUntilLedgerSeq = + restoredLiveUntilLedger; + } } uint32_t ledgerVersion = ltx.loadHeader().current().ledgerVersion; int64_t rentFee = rust_bridge::compute_rent_fee( @@ -193,6 +276,22 @@ RestoreFootprintOpFrame::doCheckValidForSoroban( } } +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + if (protocolVersionStartsFrom( + ledgerVersion, + Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + if (!mParentTx.hasSorobanProofs() || + !checkRestorationProofValidity(mParentTx.sorobanProofs())) + { + sorobanData.pushValidationTimeDiagnosticError( + appConfig, SCE_VALUE, SCEC_INVALID_INPUT, + "invalid restoration proof"); + return false; + } + } +#endif + return true; } diff --git a/src/transactions/TransactionFrame.cpp b/src/transactions/TransactionFrame.cpp index 4e488625a9..7c74b951f9 100644 --- a/src/transactions/TransactionFrame.cpp +++ b/src/transactions/TransactionFrame.cpp @@ -56,6 +56,32 @@ namespace // Limit to the maximum resource fee allowed for transaction, // roughly 112 million lumens. int64_t const MAX_RESOURCE_FEE = 1LL << 50; + +// Starting in protocol 23, some operation meta needs to be modified +// to be consumed by downstream systems. In particular, restoration is +// logically a new entry creation from the perspective of ltx and stellar-core +// as a whole, but this change type is reclassified to LEDGER_ENTRY_RESTORED +// for easier consumption downstream. +void +processOpLedgerEntryChanges(std::shared_ptr op, + LedgerEntryChanges& changes) +{ + if (op->getOperation().body.type() != RESTORE_FOOTPRINT) + { + return; + } + + for (auto& change : changes) + { + if (change.type() == LEDGER_ENTRY_CREATED) + { + auto le = change.created(); + change.type(LEDGER_ENTRY_RESTORED); + change.restored() = le; + } + } +} + } // namespace using namespace std; @@ -401,6 +427,21 @@ TransactionFrame::sorobanResources() const return mEnvelope.v1().tx.ext.sorobanData().resources; } +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION +bool +TransactionFrame::hasSorobanProofs() const +{ + return isSoroban() && mEnvelope.v1().tx.ext.sorobanData().ext.v() == 1; +} + +xdr::xvector const& +TransactionFrame::sorobanProofs() const +{ + releaseAssertOrThrow(hasSorobanProofs()); + return mEnvelope.v1().tx.ext.sorobanData().ext.proofs(); +} +#endif + MutableTxResultPtr TransactionFrame::createSuccessResultWithFeeCharged( LedgerHeader const& header, std::optional baseFee, @@ -1629,7 +1670,15 @@ TransactionFrame::applyOperations(SignatureChecker& signatureChecker, // The operation meta will be empty if the transaction // doesn't succeed so we may as well not do any work in that // case - operationMetas.emplace_back(ltxOp.getChanges()); + auto changes = ltxOp.getChanges(); + + if (protocolVersionStartsFrom( + ledgerVersion, + Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + processOpLedgerEntryChanges(op, changes); + } + operationMetas.emplace_back(changes); } if (txRes || diff --git a/src/transactions/TransactionFrame.h b/src/transactions/TransactionFrame.h index 4cb02a0e7c..8745a0e227 100644 --- a/src/transactions/TransactionFrame.h +++ b/src/transactions/TransactionFrame.h @@ -287,6 +287,11 @@ class TransactionFrame : public TransactionFrameBase virtual int64 declaredSorobanResourceFee() const override; virtual bool XDRProvidesValidFee() const override; +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + bool hasSorobanProofs() const override; + xdr::xvector const& sorobanProofs() const override; +#endif + #ifdef BUILD_TESTS friend class TransactionTestFrame; #endif diff --git a/src/transactions/TransactionFrameBase.h b/src/transactions/TransactionFrameBase.h index a3db674fce..b14ab9dbf8 100644 --- a/src/transactions/TransactionFrameBase.h +++ b/src/transactions/TransactionFrameBase.h @@ -113,5 +113,10 @@ class TransactionFrameBase virtual SorobanResources const& sorobanResources() const = 0; virtual int64 declaredSorobanResourceFee() const = 0; virtual bool XDRProvidesValidFee() const = 0; + +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + virtual bool hasSorobanProofs() const = 0; + virtual xdr::xvector const& sorobanProofs() const = 0; +#endif }; } diff --git a/src/transactions/TransactionSQL.cpp b/src/transactions/TransactionSQL.cpp index ff37172218..9e87fb56a8 100644 --- a/src/transactions/TransactionSQL.cpp +++ b/src/transactions/TransactionSQL.cpp @@ -363,19 +363,9 @@ storeTransaction(Database& db, uint32_t ledgerSeq, uint32_t txIndex = static_cast(resultSet.results.size()); std::string sqlStr; - if (cfg.isUsingBucketListDB()) - { - sqlStr = "INSERT INTO txhistory " - "( txid, ledgerseq, txindex, txbody, txresult) VALUES " - "(:id, :seq, :txindex, :txb, :txres)"; - } - else - { - sqlStr = - "INSERT INTO txhistory " - "( txid, ledgerseq, txindex, txbody, txresult, txmeta) VALUES " - "(:id, :seq, :txindex, :txb, :txres, :meta)"; - } + sqlStr = "INSERT INTO txhistory " + "( txid, ledgerseq, txindex, txbody, txresult) VALUES " + "(:id, :seq, :txindex, :txb, :txres)"; auto prep = db.getPreparedStatement(sqlStr); auto& st = prep.statement(); @@ -385,11 +375,6 @@ storeTransaction(Database& db, uint32_t ledgerSeq, st.exchange(soci::use(txBody)); st.exchange(soci::use(txResult)); - if (!cfg.isUsingBucketListDB()) - { - st.exchange(soci::use(meta)); - } - st.define_and_bind(); { auto timer = db.getInsertTimer("txhistory"); @@ -581,20 +566,14 @@ dropTransactionHistory(Database& db, Config const& cfg) { ZoneScoped; db.getSession() << "DROP TABLE IF EXISTS txhistory"; - - // txmeta only supported when BucketListDB is not enabled - std::string txMetaColumn = - cfg.isUsingBucketListDB() ? "" : "txmeta TEXT NOT NULL,"; - db.getSession() << "CREATE TABLE txhistory (" "txid CHARACTER(64) NOT NULL," "ledgerseq INT NOT NULL CHECK (ledgerseq >= 0)," "txindex INT NOT NULL," "txbody TEXT NOT NULL," - "txresult TEXT NOT NULL," + - txMetaColumn + - "PRIMARY KEY (ledgerseq, txindex)" - ")"; + "txresult TEXT NOT NULL," + "PRIMARY KEY (ledgerseq, txindex)" + ")"; db.getSession() << "CREATE INDEX histbyseq ON txhistory (ledgerseq);"; diff --git a/src/transactions/test/AllowTrustTests.cpp b/src/transactions/test/AllowTrustTests.cpp index 398bee5e28..43c25f3824 100644 --- a/src/transactions/test/AllowTrustTests.cpp +++ b/src/transactions/test/AllowTrustTests.cpp @@ -82,7 +82,7 @@ template struct TestStub TrustFlagOp flagOp = V == 0 ? TrustFlagOp::ALLOW_TRUST : TrustFlagOp::SET_TRUST_LINE_FLAGS; - auto const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + auto const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; auto app = createTestApplication(clock, cfg); @@ -377,7 +377,7 @@ template struct TestStub TrustFlagOp flagOp = V == 0 ? TrustFlagOp::ALLOW_TRUST : TrustFlagOp::SET_TRUST_LINE_FLAGS; - auto const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + auto const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; auto app = createTestApplication(clock, cfg); diff --git a/src/transactions/test/BumpSequenceTests.cpp b/src/transactions/test/BumpSequenceTests.cpp index f8a43d42ca..9a09b171f2 100644 --- a/src/transactions/test/BumpSequenceTests.cpp +++ b/src/transactions/test/BumpSequenceTests.cpp @@ -25,7 +25,7 @@ using namespace stellar::txtest; TEST_CASE_VERSIONS("bump sequence", "[tx][bumpsequence]") { - Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; auto app = createTestApplication(clock, cfg); diff --git a/src/transactions/test/ChangeTrustTests.cpp b/src/transactions/test/ChangeTrustTests.cpp index fcb09d4af6..e6e021264d 100644 --- a/src/transactions/test/ChangeTrustTests.cpp +++ b/src/transactions/test/ChangeTrustTests.cpp @@ -23,7 +23,7 @@ using namespace stellar::txtest; TEST_CASE_VERSIONS("change trust", "[tx][changetrust]") { - Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; auto app = createTestApplication(clock, cfg); @@ -303,7 +303,7 @@ TEST_CASE_VERSIONS("change trust", "[tx][changetrust]") TEST_CASE_VERSIONS("change trust pool share trustline", "[tx][changetrust][liquiditypool]") { - Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; auto app = createTestApplication(clock, cfg); diff --git a/src/transactions/test/ClaimableBalanceTests.cpp b/src/transactions/test/ClaimableBalanceTests.cpp index f90e37633b..afc076258a 100644 --- a/src/transactions/test/ClaimableBalanceTests.cpp +++ b/src/transactions/test/ClaimableBalanceTests.cpp @@ -298,7 +298,7 @@ validateBalancesOnCreateAndClaim(TestAccount& createAcc, TestAccount& claimAcc, TEST_CASE_VERSIONS("claimableBalance", "[tx][claimablebalance]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; auto app = createTestApplication(clock, cfg); diff --git a/src/transactions/test/ClawbackClaimableBalanceTests.cpp b/src/transactions/test/ClawbackClaimableBalanceTests.cpp index 195dd9aee0..b43bc5a015 100644 --- a/src/transactions/test/ClawbackClaimableBalanceTests.cpp +++ b/src/transactions/test/ClawbackClaimableBalanceTests.cpp @@ -19,7 +19,7 @@ using namespace stellar::txtest; TEST_CASE_VERSIONS("clawbackClaimableBalance", "[tx][clawback][claimablebalance]") { - Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; auto app = createTestApplication(clock, cfg); diff --git a/src/transactions/test/ClawbackTests.cpp b/src/transactions/test/ClawbackTests.cpp index eee797d441..f0238c35f1 100644 --- a/src/transactions/test/ClawbackTests.cpp +++ b/src/transactions/test/ClawbackTests.cpp @@ -17,7 +17,7 @@ using namespace stellar::txtest; TEST_CASE_VERSIONS("clawback", "[tx][clawback]") { - Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; auto app = createTestApplication(clock, cfg); diff --git a/src/transactions/test/CreateAccountTests.cpp b/src/transactions/test/CreateAccountTests.cpp index fb8ed2e424..aedb03989c 100644 --- a/src/transactions/test/CreateAccountTests.cpp +++ b/src/transactions/test/CreateAccountTests.cpp @@ -31,7 +31,7 @@ TEST_CASE_VERSIONS("create account", "[tx][createaccount]") { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); // set up world auto root = TestAccount::createRoot(*app); diff --git a/src/transactions/test/EndSponsoringFutureReservesTests.cpp b/src/transactions/test/EndSponsoringFutureReservesTests.cpp index a92b6e2281..9220ac2617 100644 --- a/src/transactions/test/EndSponsoringFutureReservesTests.cpp +++ b/src/transactions/test/EndSponsoringFutureReservesTests.cpp @@ -34,7 +34,7 @@ TEST_CASE_VERSIONS("confirm and clear sponsor", "[tx][sponsorship]") { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); auto root = TestAccount::createRoot(*app); int64_t minBalance = app->getLedgerManager().getLastMinBalance(0); diff --git a/src/transactions/test/FeeBumpTransactionTests.cpp b/src/transactions/test/FeeBumpTransactionTests.cpp index c645829d61..efc2ec212e 100644 --- a/src/transactions/test/FeeBumpTransactionTests.cpp +++ b/src/transactions/test/FeeBumpTransactionTests.cpp @@ -66,7 +66,7 @@ TEST_CASE_VERSIONS("fee bump transactions", "[tx][feebump]") { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); auto& lm = app->getLedgerManager(); auto fee = lm.getLastClosedLedgerHeader().header.baseFee; diff --git a/src/transactions/test/InflationTests.cpp b/src/transactions/test/InflationTests.cpp index dbf2d8feef..f5cc0697f0 100644 --- a/src/transactions/test/InflationTests.cpp +++ b/src/transactions/test/InflationTests.cpp @@ -432,7 +432,7 @@ TEST_CASE_VERSIONS("inflation total coins", "[tx][inflation]") TEST_CASE_VERSIONS("inflation", "[tx][inflation]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock::system_time_point inflationStart; // inflation starts on 1-jul-2014 diff --git a/src/transactions/test/InvokeHostFunctionTests.cpp b/src/transactions/test/InvokeHostFunctionTests.cpp index c7c19295fc..a7202aadd3 100644 --- a/src/transactions/test/InvokeHostFunctionTests.cpp +++ b/src/transactions/test/InvokeHostFunctionTests.cpp @@ -2,7 +2,10 @@ // under the Apache License, Version 2.0. See the COPYING file at the root // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 +#include "test/test.h" #include "util/Logging.h" +#include "util/ProtocolVersion.h" +#include "util/UnorderedSet.h" #include "xdr/Stellar-transaction.h" #include #include @@ -28,6 +31,7 @@ #include "transactions/TransactionUtils.h" #include "transactions/test/SorobanTxTestUtils.h" #include "transactions/test/SponsorshipTestUtils.h" +#include "util/ArchivalProofs.h" #include "util/Decoder.h" #include "util/TmpDir.h" #include "util/XDRCereal.h" @@ -1192,15 +1196,27 @@ TEST_CASE_VERSIONS("refund still happens on bad auth", "[tx][soroban]") auto a1PostTxBalance = a1.getBalance(); - bool afterV20 = protocolVersionStartsFrom( - getLclProtocolVersion(test.getApp()), ProtocolVersion::V_21); - - auto fee = afterV20 ? 62697 : 39288; + uint64_t txFeeWithRefund = 0; + if (protocolVersionStartsFrom(getLclProtocolVersion(test.getApp()), + ProtocolVersion::V_23)) + { + // Slightly larger TX size due to proofs + txFeeWithRefund = 62'706; + } + else if (protocolVersionStartsFrom(getLclProtocolVersion(test.getApp()), + ProtocolVersion::V_21)) + { + txFeeWithRefund = 62'697; + } + else + { + txFeeWithRefund = 39'288; + } // The initial fee charge is based on DEFAULT_TEST_RESOURCE_FEE, which // is 1'000'000, so the difference would be much higher if the refund // did not happen. - REQUIRE(a1PreTxBalance - a1PostTxBalance == fee); + REQUIRE(a1PreTxBalance - a1PostTxBalance == txFeeWithRefund); }); } @@ -1229,10 +1245,23 @@ TEST_CASE_VERSIONS("refund test with closeLedger", "[tx][soroban][feebump]") auto r = closeLedger(test.getApp(), {tx}); checkTx(0, r, txSUCCESS); - bool afterV20 = protocolVersionStartsFrom( - getLclProtocolVersion(test.getApp()), ProtocolVersion::V_21); + uint64_t txFeeWithRefund = 0; + if (protocolVersionStartsFrom(getLclProtocolVersion(test.getApp()), + ProtocolVersion::V_23)) + { + // Slightly larger TX size due to proofs + txFeeWithRefund = 82'762; + } + else if (protocolVersionStartsFrom(getLclProtocolVersion(test.getApp()), + ProtocolVersion::V_21)) + { + txFeeWithRefund = 82'753; + } + else + { + txFeeWithRefund = 59'344; + } - auto txFeeWithRefund = afterV20 ? 82'753 : 59'344; REQUIRE(a1.getBalance() == a1StartingBalance - txFeeWithRefund); // DEFAULT_TEST_RESOURCE_FEE is added onto the calculated soroban @@ -1287,7 +1316,23 @@ TEST_CASE_VERSIONS("refund is sent to fee-bump source", bool afterV20 = protocolVersionStartsFrom( getLclProtocolVersion(test.getApp()), ProtocolVersion::V_21); - auto const txFeeWithRefund = afterV20 ? 82'853 : 59'444; + uint64_t txFeeWithRefund = 0; + if (protocolVersionStartsFrom(getLclProtocolVersion(test.getApp()), + ProtocolVersion::V_23)) + { + // Slightly larger TX size due to proofs + txFeeWithRefund = 82'862; + } + else if (protocolVersionStartsFrom(getLclProtocolVersion(test.getApp()), + ProtocolVersion::V_21)) + { + txFeeWithRefund = 82'853; + } + else + { + txFeeWithRefund = 59'444; + } + auto const feeCharged = afterV20 ? txFeeWithRefund : 1'040'971; REQUIRE( @@ -1912,7 +1957,7 @@ TEST_CASE("ledger entry size limit enforced", "[tx][soroban]") } } -TEST_CASE("contract storage", "[tx][soroban]") +TEST_CASE("contract storage", "[tx][soroban][archival]") { auto modifyCfg = [](SorobanNetworkConfig& cfg) { // Increase write fee so the fee will be greater than 1 @@ -2070,7 +2115,7 @@ TEST_CASE("contract storage", "[tx][soroban]") } } -TEST_CASE("state archival", "[tx][soroban]") +TEST_CASE("state archival", "[tx][soroban][archival]") { SorobanTest test(getTestConfig(), true, [](SorobanNetworkConfig& cfg) { cfg.mWriteFee1KBBucketListLow = 20'000; @@ -2551,32 +2596,26 @@ TEST_CASE("charge rent fees for storage resize", "[tx][soroban]") } } -TEST_CASE("temp entry eviction", "[tx][soroban]") +TEST_CASE_VERSIONS("entry eviction", "[tx][soroban][archival]") { - auto test = [](bool enableBucketListDB, bool backgroundEviction) { - if (backgroundEviction && !enableBucketListDB) + auto test = [](UnorderedSet versionsToTest) { + // Currently, SorobanTest does not support for_versions, so we test all + // versions and return early if the version is not in versionsToTest. We + // should fix this at some point, but the refactor is non-trivial + Config cfg = getTestConfig(); + + if (versionsToTest.find(cfg.TESTING_UPGRADE_LEDGER_PROTOCOL_VERSION) == + versionsToTest.end()) { - throw "testing error: backgroundEviction requires " - "enableBucketListDB == true"; + return; } - Config cfg = getTestConfig(); TmpDirManager tdm(std::string("soroban-storage-meta-") + binToHex(randomBytes(8))); TmpDir td = tdm.tmpDir("soroban-meta-ok"); std::string metaPath = td.getName() + "/stream.xdr"; cfg.METADATA_OUTPUT_STREAM = metaPath; - cfg.DEPRECATED_SQL_LEDGER_STATE = !enableBucketListDB; - cfg.BACKGROUND_EVICTION_SCAN = backgroundEviction; - - // overrideSorobanNetworkConfigForTest commits directly to the - // database, will not work if BucketListDB is enabled so we must use - // the cfg override - if (enableBucketListDB) - { - cfg.TESTING_SOROBAN_HIGH_LIMIT_OVERRIDE = true; - } SorobanTest test(cfg); ContractStorageTestClient client(test); @@ -2617,7 +2656,7 @@ TEST_CASE("temp entry eviction", "[tx][soroban]") REQUIRE(!test.isEntryLive(lk, test.getLCLSeq())); - SECTION("eviction") + SECTION("temp entry meta") { // close one more ledger to trigger the eviction closeLedgerOn(test.getApp(), evictionLedger, 2, 1, 2016); @@ -2652,6 +2691,195 @@ TEST_CASE("temp entry eviction", "[tx][soroban]") REQUIRE(evicted); } +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + SECTION("persistent entry meta") + { + auto persistentInvocation = client.getContract().prepareInvocation( + "put_persistent", {makeSymbolSCVal("key"), makeU64SCVal(123)}, + client.writeKeySpec("key", ContractDataDurability::PERSISTENT)); + REQUIRE(persistentInvocation.withExactNonRefundableResourceFee() + .invoke()); + + auto persistentKey = client.getContract().getDataKey( + makeSymbolSCVal("key"), ContractDataDurability::PERSISTENT); + + LedgerEntry persistentLE; + { + LedgerTxn ltx(test.getApp().getLedgerTxnRoot()); + auto ltxe = ltx.load(persistentKey); + REQUIRE(ltxe); + persistentLE = ltxe.current(); + } + + // Entry must merge down the BucketList until it is in the first + // scan level + auto evictionLedger = 8193; + + // Close ledgers until entry is evicted + for (uint32_t i = test.getLCLSeq(); i <= evictionLedger; ++i) + { + closeLedgerOn(test.getApp(), i, 2, 1, 2016); + } + + if (protocolVersionStartsFrom( + cfg.TESTING_UPGRADE_LEDGER_PROTOCOL_VERSION, + Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + LedgerTxn ltx(test.getApp().getLedgerTxnRoot()); + REQUIRE(!ltx.load(persistentKey)); + } + + SECTION("eviction meta") + { + XDRInputFileStream in; + in.open(metaPath); + LedgerCloseMeta lcm; + bool evicted = false; + while (in.readOne(lcm)) + { + REQUIRE(lcm.v() == 1); + if (lcm.v1().ledgerHeader.header.ledgerSeq == + evictionLedger) + { + // Only support persistent eviction meta >= p23 + if (protocolVersionStartsFrom( + cfg.TESTING_UPGRADE_LEDGER_PROTOCOL_VERSION, + Bucket:: + FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + // TLL should be in "deleted" key section (called + // evictedTemporaryLedgerKeys for legacy reasons). + REQUIRE( + lcm.v1().evictedTemporaryLedgerKeys.size() == + 1); + REQUIRE( + lcm.v1().evictedTemporaryLedgerKeys.front() == + getTTLKey(persistentKey)); + + REQUIRE( + lcm.v1() + .evictedPersistentLedgerEntries.size() == + 1); + REQUIRE( + lcm.v1() + .evictedPersistentLedgerEntries.front() == + persistentLE); + evicted = true; + } + else + { + REQUIRE( + lcm.v1().evictedTemporaryLedgerKeys.empty()); + REQUIRE( + lcm.v1() + .evictedPersistentLedgerEntries.empty()); + evicted = false; + } + + break; + } + } + + if (protocolVersionStartsFrom( + cfg.TESTING_UPGRADE_LEDGER_PROTOCOL_VERSION, + Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + REQUIRE(evicted); + } + else + { + REQUIRE(!evicted); + } + } + + SECTION("Restoration Meta") + { + test.invokeRestoreOp({persistentKey}, 20'048); + auto targetRestorationLedger = test.getLCLSeq(); + + XDRInputFileStream in; + in.open(metaPath); + LedgerCloseMeta lcm; + bool restoreMeta = false; + + LedgerKeySet keysToRestore = {persistentKey, + getTTLKey(persistentKey)}; + while (in.readOne(lcm)) + { + REQUIRE(lcm.v() == 1); + if (lcm.v1().ledgerHeader.header.ledgerSeq == + targetRestorationLedger) + { + REQUIRE(lcm.v1().evictedTemporaryLedgerKeys.empty()); + REQUIRE( + lcm.v1().evictedPersistentLedgerEntries.empty()); + + REQUIRE(lcm.v1().txProcessing.size() == 1); + auto txMeta = lcm.v1().txProcessing.front(); + REQUIRE( + txMeta.txApplyProcessing.v3().operations.size() == + 1); + + REQUIRE(txMeta.txApplyProcessing.v3() + .operations[0] + .changes.size() == 2); + for (auto const& change : txMeta.txApplyProcessing.v3() + .operations[0] + .changes) + { + + // Only support persistent eviction meta >= p23 + LedgerKey lk; + if (protocolVersionStartsFrom( + cfg.TESTING_UPGRADE_LEDGER_PROTOCOL_VERSION, + Bucket:: + FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION)) + { + REQUIRE(change.type() == + LedgerEntryChangeType:: + LEDGER_ENTRY_RESTORED); + lk = LedgerEntryKey(change.restored()); + REQUIRE(keysToRestore.find(lk) != + keysToRestore.end()); + keysToRestore.erase(lk); + } + else + { + if (change.type() == + LedgerEntryChangeType::LEDGER_ENTRY_STATE) + { + lk = LedgerEntryKey(change.state()); + REQUIRE(lk == getTTLKey(persistentKey)); + keysToRestore.erase(lk); + } + else + { + REQUIRE(change.type() == + LedgerEntryChangeType:: + LEDGER_ENTRY_UPDATED); + lk = LedgerEntryKey(change.updated()); + REQUIRE(lk == getTTLKey(persistentKey)); + + // While we will see the TTL key twice, + // remove the TTL key in the path above and + // the persistent key here to make the check + // easier + keysToRestore.erase(persistentKey); + } + } + } + + restoreMeta = true; + break; + } + } + + REQUIRE(restoreMeta); + REQUIRE(keysToRestore.empty()); + } + } +#endif + SECTION( "Create temp entry with same key as an expired entry on eviction " "ledger") @@ -2681,26 +2909,15 @@ TEST_CASE("temp entry eviction", "[tx][soroban]") } }; - SECTION("sql") - { - test(/*enableBucketListDB=*/false, /*backgroundEviction=*/false); - } - - SECTION("BucketListDB") - { - SECTION("legacy main thread scan") - { - test(/*enableBucketListDB=*/true, /*backgroundEviction=*/false); - } - - SECTION("background scan") - { - test(/*enableBucketListDB=*/true, /*backgroundEviction=*/true); - } - } + test({22 +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + , + 23 +#endif + }); } -TEST_CASE("state archival operation errors", "[tx][soroban]") +TEST_CASE("state archival operation errors", "[tx][soroban][archival]") { SorobanTest test; ContractStorageTestClient client(test); @@ -2831,6 +3048,188 @@ TEST_CASE("state archival operation errors", "[tx][soroban]") } } +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION +TEST_CASE("evicted persistent entries", "[tx][soroban][archival]") +{ + auto test = [](bool requireProofs) { + auto cfg = getTestConfig(); + cfg.REQUIRE_PROOFS_FOR_ALL_EVICTED_ENTRIES = requireProofs; + SorobanTest test(cfg, true, [](SorobanNetworkConfig& cfg) { + cfg.stateArchivalSettings().startingEvictionScanLevel = 1; + cfg.stateArchivalSettings().minPersistentTTL = 4; + }); + + ContractStorageTestClient client(test); + + // WASM and instance should not expire + test.invokeExtendOp(client.getContract().getKeys(), 10'000); + + auto writeInvocation = client.getContract().prepareInvocation( + "put_persistent", {makeSymbolSCVal("key"), makeU64SCVal(123)}, + client.writeKeySpec("key", ContractDataDurability::PERSISTENT)); + REQUIRE(writeInvocation.withExactNonRefundableResourceFee().invoke()); + auto lk = client.getContract().getDataKey( + makeSymbolSCVal("key"), ContractDataDurability::PERSISTENT); + + auto hotArchive = test.getApp() + .getBucketManager() + .getBucketSnapshotManager() + .copySearchableHotArchiveBucketListSnapshot(); + + auto evictionLedger = 14; + + // Close ledgers until entry is evicted + for (uint32_t i = test.getLCLSeq(); i < evictionLedger; ++i) + { + closeLedgerOn(test.getApp(), i, 2, 1, 2016); + } + + REQUIRE(hotArchive->load(lk)); + REQUIRE(!hotArchive->load(getTTLKey(lk))); + { + LedgerTxn ltx(test.getApp().getLedgerTxnRoot()); + REQUIRE(!ltx.load(lk)); + REQUIRE(!ltx.load(getTTLKey(lk))); + } + + // Rewriting entry should fail since key is in Hot Archive + REQUIRE(!writeInvocation.withExactNonRefundableResourceFee().invoke()); + + // Reads should also fail + REQUIRE(client.has("key", ContractDataDurability::PERSISTENT, + std::nullopt) == + INVOKE_HOST_FUNCTION_ENTRY_ARCHIVED); + + // Rent extension is a no op + SorobanResources resources; + resources.footprint.readOnly = {lk}; + + resources.instructions = 0; + resources.readBytes = 1000; + resources.writeBytes = 1000; + auto resourceFee = 1'000'000; + + auto extendTx = + test.createExtendOpTx(resources, 100, 1'000, resourceFee); + auto result = test.invokeTx(extendTx); + REQUIRE(isSuccessResult(result)); + + REQUIRE(client.has("key", ContractDataDurability::PERSISTENT, + std::nullopt) == + INVOKE_HOST_FUNCTION_ENTRY_ARCHIVED); + + // Refundable fee should be identical in proof and non-proof case + auto const expectedRefundableFeeCharged = 20'048; + + if (requireProofs) + { + // Proof generation should fail for keys that don't exist + xdr::xvector proofs; + auto wrongLk = client.getContract().getDataKey( + makeSymbolSCVal("null"), ContractDataDurability::PERSISTENT); + REQUIRE(!addRestorationProof(hotArchive, wrongLk, proofs)); + + // RestoreOp should fail with no proof/ empty proof + SorobanResources restoreResources; + restoreResources.footprint.readWrite = {lk}; + + restoreResources.instructions = 0; + restoreResources.readBytes = 1000; + restoreResources.writeBytes = 1000; + + auto badRestore = + test.createRestoreTx(restoreResources, 100000, 1'000000); + auto restoreResult = test.invokeTx(badRestore); + REQUIRE(restoreResult.result.code() == txFAILED); + + // TODO: Switch to RESTORE_FOOTPRINT_INVALID_PROOF + // when XDR changes in rust are merged + REQUIRE(restoreResult.result.results()[0] + .tr() + .restoreFootprintResult() + .code() == RESTORE_FOOTPRINT_MALFORMED); + + REQUIRE(addRestorationProof(hotArchive, lk, proofs)); + + // Should succeed after adding proper proofs + test.invokeRestoreOp({lk}, expectedRefundableFeeCharged, proofs); + } + else + { + // Restore should succeed without proof for hot archive entries + test.invokeRestoreOp({lk}, expectedRefundableFeeCharged); + } + + auto const& stateArchivalSettings = + test.getNetworkCfg().stateArchivalSettings(); + auto newExpectedLiveUntilLedger = + test.getLCLSeq() + stateArchivalSettings.minPersistentTTL - 1; + REQUIRE(test.getTTL(lk) == newExpectedLiveUntilLedger); + + client.get("key", ContractDataDurability::PERSISTENT, 123); + + // Hot Archive entry removed after restoration + REQUIRE(!hotArchive->load(lk)); + + client.del("key", ContractDataDurability::PERSISTENT); + client.has("key", ContractDataDurability::PERSISTENT, false); + + // Hot archive should record deletion + auto hotArchiveEntry = hotArchive->load(lk); + REQUIRE(hotArchiveEntry); + REQUIRE(hotArchiveEntry->type() == HOT_ARCHIVE_DELETED); + REQUIRE(hotArchiveEntry->key() == lk); + + // Recreation should remove entry from hot archive + client.put("key", ContractDataDurability::PERSISTENT, 345); + client.get("key", ContractDataDurability::PERSISTENT, 345); + REQUIRE(!hotArchive->load(lk)); + }; + + SECTION("with proofs") + { + test(true); + } + + SECTION("restore hot archive entries") + { + test(false); + } +} + +TEST_CASE("persistent entry archival filters", "[soroban][archival]") +{ + auto cfg = getTestConfig(); + cfg.ARTIFICIALLY_SIMULATE_ARCHIVE_FILTER_MISS = true; + + SorobanTest test(cfg); + ContractStorageTestClient client(test); + + auto writeInvocation = client.getContract().prepareInvocation( + "put_persistent", {makeSymbolSCVal("miss"), makeU64SCVal(123)}, + client.writeKeySpec("miss", ContractDataDurability::PERSISTENT)); + + // Invocation should fail when no proof is provided + REQUIRE(!writeInvocation.withExactNonRefundableResourceFee().invoke()); + + // TODO: Replace with proper error code once Rust XDR has been generated + REQUIRE(*writeInvocation.getResultCode() == + INVOKE_HOST_FUNCTION_ENTRY_ARCHIVED); + + xdr::xvector proofs; + addCreationProof( + cfg.ARTIFICIALLY_SIMULATE_ARCHIVE_FILTER_MISS, + client.getContract().getDataKey(makeSymbolSCVal("miss"), + ContractDataDurability::PERSISTENT), + proofs); + + REQUIRE(writeInvocation.withProofs(proofs) + .withExactNonRefundableResourceFee() + .invoke()); +} + +#endif + /* This test uses the same utils (SettingsUpgradeUtils.h) as the get-settings-upgrade-txs command to make sure the transactions have the @@ -2839,7 +3238,7 @@ TEST_CASE("state archival operation errors", "[tx][soroban]") TEST_CASE("settings upgrade command line utils", "[tx][soroban][upgrades]") { VirtualClock clock; - auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); cfg.ENABLE_SOROBAN_DIAGNOSTIC_EVENTS = true; auto app = createTestApplication(clock, cfg); auto root = TestAccount::createRoot(*app); @@ -3143,6 +3542,10 @@ TEST_CASE("settings upgrade command line utils", "[tx][soroban][upgrades]") sha256(xdr::xdr_to_opaque(app->getNetworkID(), ENVELOPE_TYPE_TX, txEnv.v1().tx)))); +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + txEnv.v1().tx.ext.sorobanData().ext.v(1); +#endif + auto const& rawTx = TransactionFrameBase::makeTransactionFromWire( app->getNetworkID(), txEnv); auto tx = TransactionTestFrame::fromTxFrame(rawTx); diff --git a/src/transactions/test/LiquidityPoolDepositTests.cpp b/src/transactions/test/LiquidityPoolDepositTests.cpp index 2bf6cd413a..1b8b899eaf 100644 --- a/src/transactions/test/LiquidityPoolDepositTests.cpp +++ b/src/transactions/test/LiquidityPoolDepositTests.cpp @@ -18,7 +18,7 @@ TEST_CASE_VERSIONS("liquidity pool deposit", "[tx][liquiditypool]") { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); // set up world auto const& lm = app->getLedgerManager(); diff --git a/src/transactions/test/LiquidityPoolTradeTests.cpp b/src/transactions/test/LiquidityPoolTradeTests.cpp index 9cddacf59b..12b0ab3779 100644 --- a/src/transactions/test/LiquidityPoolTradeTests.cpp +++ b/src/transactions/test/LiquidityPoolTradeTests.cpp @@ -983,7 +983,7 @@ TEST_CASE_VERSIONS("liquidity pool trade", "[tx][liquiditypool]") { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); // set up world auto minBal = [&](int32_t n) { diff --git a/src/transactions/test/LiquidityPoolWithdrawTests.cpp b/src/transactions/test/LiquidityPoolWithdrawTests.cpp index a6cb9b6c77..df3acf8b3d 100644 --- a/src/transactions/test/LiquidityPoolWithdrawTests.cpp +++ b/src/transactions/test/LiquidityPoolWithdrawTests.cpp @@ -17,7 +17,7 @@ using namespace stellar::txtest; TEST_CASE_VERSIONS("liquidity pool withdraw", "[tx][liquiditypool]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; auto app = createTestApplication(clock, cfg); diff --git a/src/transactions/test/ManageBuyOfferTests.cpp b/src/transactions/test/ManageBuyOfferTests.cpp index 377794b5b0..8eb416e7b9 100644 --- a/src/transactions/test/ManageBuyOfferTests.cpp +++ b/src/transactions/test/ManageBuyOfferTests.cpp @@ -47,7 +47,7 @@ TEST_CASE_VERSIONS("manage buy offer failure modes", "[tx][offers]") { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); int64_t const txfee = app->getLedgerManager().getLastTxFee(); int64_t const minBalancePlusFees = @@ -354,7 +354,7 @@ TEST_CASE_VERSIONS("manage buy offer liabilities", "[tx][offers]") { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); auto checkLiabilities = [&](std::string const& section, int64_t buyAmount, Price const& price, int64_t expectedBuying, @@ -438,7 +438,7 @@ TEST_CASE_VERSIONS("manage buy offer exactly crosses existing offers", { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); int64_t const txfee = app->getLedgerManager().getLastTxFee(); int64_t const minBalancePlusFees = @@ -491,7 +491,7 @@ TEST_CASE_VERSIONS( { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); int64_t const txfee = app->getLedgerManager().getLastTxFee(); int64_t const minBalancePlusFees = @@ -619,7 +619,7 @@ TEST_CASE_VERSIONS( { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); int64_t const txfee = app->getLedgerManager().getLastTxFee(); int64_t const minBalancePlusFees = @@ -774,7 +774,7 @@ TEST_CASE_VERSIONS( { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); int64_t const txfee = app->getLedgerManager().getLastTxFee(); int64_t const minBalancePlusFees = @@ -927,7 +927,7 @@ TEST_CASE_VERSIONS("manage buy offer with zero liabilities", "[tx][offers]") { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); int64_t const txfee = app->getLedgerManager().getLastTxFee(); int64_t const minBalancePlusFees = @@ -983,7 +983,7 @@ TEST_CASE_VERSIONS("manage buy offer releases liabilities before modify", { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); int64_t const txfee = app->getLedgerManager().getLastTxFee(); int64_t const minBalancePlusFees = diff --git a/src/transactions/test/ManageDataTests.cpp b/src/transactions/test/ManageDataTests.cpp index d1b5dbcfe4..770ba6f2e5 100644 --- a/src/transactions/test/ManageDataTests.cpp +++ b/src/transactions/test/ManageDataTests.cpp @@ -26,7 +26,7 @@ using namespace stellar::txtest; // add too much data TEST_CASE_VERSIONS("manage data", "[tx][managedata]") { - Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; auto app = createTestApplication(clock, cfg); diff --git a/src/transactions/test/MergeTests.cpp b/src/transactions/test/MergeTests.cpp index d462768ac0..83595e7ff1 100644 --- a/src/transactions/test/MergeTests.cpp +++ b/src/transactions/test/MergeTests.cpp @@ -34,7 +34,7 @@ using namespace stellar::txtest; // Merge when you have outstanding data entries TEST_CASE_VERSIONS("merge", "[tx][merge]") { - Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS)); + Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY)); VirtualClock clock; auto app = createTestApplication(clock, cfg); diff --git a/src/transactions/test/OfferTests.cpp b/src/transactions/test/OfferTests.cpp index 184766e7b8..d85c7eb9a5 100644 --- a/src/transactions/test/OfferTests.cpp +++ b/src/transactions/test/OfferTests.cpp @@ -36,7 +36,7 @@ using namespace stellar::txtest; TEST_CASE_VERSIONS("create offer", "[tx][offers]") { - Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS); + Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; auto app = createTestApplication(clock, cfg); diff --git a/src/transactions/test/PathPaymentStrictSendTests.cpp b/src/transactions/test/PathPaymentStrictSendTests.cpp index 6eb6a153a5..21fb6c48f1 100644 --- a/src/transactions/test/PathPaymentStrictSendTests.cpp +++ b/src/transactions/test/PathPaymentStrictSendTests.cpp @@ -178,7 +178,7 @@ TEST_CASE_VERSIONS("pathpayment strict send", "[tx][pathpayment]") { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); auto exchanged = [&](TestMarketOffer const& o, int64_t sold, int64_t bought) { @@ -2406,7 +2406,7 @@ TEST_CASE_VERSIONS("pathpayment strict send", "[tx][pathpayment]") TEST_CASE_VERSIONS("pathpayment strict send uses all offers in a loop", "[tx][pathpayment]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; auto app = createTestApplication(clock, cfg); diff --git a/src/transactions/test/PathPaymentTests.cpp b/src/transactions/test/PathPaymentTests.cpp index 2b74d11974..20b5d048be 100644 --- a/src/transactions/test/PathPaymentTests.cpp +++ b/src/transactions/test/PathPaymentTests.cpp @@ -70,7 +70,7 @@ assetPathToString(const std::deque& assets) TEST_CASE_VERSIONS("pathpayment", "[tx][pathpayment]") { - auto const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS); + auto const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; auto app = createTestApplication(clock, cfg); diff --git a/src/transactions/test/PaymentTests.cpp b/src/transactions/test/PaymentTests.cpp index e53faded26..d7bbf0807b 100644 --- a/src/transactions/test/PaymentTests.cpp +++ b/src/transactions/test/PaymentTests.cpp @@ -38,7 +38,7 @@ using namespace stellar::txtest; // path payment with a transfer rate TEST_CASE_VERSIONS("payment", "[tx][payment]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; auto app = createTestApplication(clock, cfg); @@ -1510,7 +1510,11 @@ TEST_CASE_VERSIONS("payment", "[tx][payment]") // Since a1 has a trustline, and there is only 1 trustline, we know // that gateway has no trustlines. - REQUIRE(app->getLedgerTxnRoot().countObjects(TRUSTLINE) == 1); + LedgerSnapshot lsg(*app); + LedgerKey trustKey(TRUSTLINE); + trustKey.trustLine().accountID = gateway.getPublicKey(); + trustKey.trustLine().asset = assetToTrustLineAsset(idr); + REQUIRE(!lsg.load(trustKey)); }); } SECTION("authorize flag") @@ -1930,7 +1934,7 @@ TEST_CASE_VERSIONS("payment fees", "[tx][payment]") SECTION("fee equal to base reserve") { - auto cfg = getTestConfig(1, Config::TESTDB_IN_MEMORY_NO_OFFERS); + auto cfg = getTestConfig(1, Config::TESTDB_IN_MEMORY); cfg.TESTING_UPGRADE_DESIRED_FEE = 100000000; VirtualClock clock; @@ -2040,7 +2044,7 @@ TEST_CASE_VERSIONS("payment fees", "[tx][payment]") SECTION("fee bigger than base reserve") { - auto cfg = getTestConfig(1, Config::TESTDB_IN_MEMORY_NO_OFFERS); + auto cfg = getTestConfig(1, Config::TESTDB_IN_MEMORY); cfg.TESTING_UPGRADE_DESIRED_FEE = 200000000; VirtualClock clock; diff --git a/src/transactions/test/RevokeSponsorshipTests.cpp b/src/transactions/test/RevokeSponsorshipTests.cpp index aa7c4db2fe..aee010fc1e 100644 --- a/src/transactions/test/RevokeSponsorshipTests.cpp +++ b/src/transactions/test/RevokeSponsorshipTests.cpp @@ -40,7 +40,7 @@ TEST_CASE_VERSIONS("update sponsorship", "[tx][sponsorship]") { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); auto minBal = [&](uint32_t n) { return app->getLedgerManager().getLastMinBalance(n); diff --git a/src/transactions/test/SetOptionsTests.cpp b/src/transactions/test/SetOptionsTests.cpp index 482b55a0e9..655259e190 100644 --- a/src/transactions/test/SetOptionsTests.cpp +++ b/src/transactions/test/SetOptionsTests.cpp @@ -36,7 +36,7 @@ using namespace stellar::txtest; // minbalance TEST_CASE_VERSIONS("set options", "[tx][setoptions]") { - Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; auto app = createTestApplication(clock, cfg); diff --git a/src/transactions/test/SetTrustLineFlagsTests.cpp b/src/transactions/test/SetTrustLineFlagsTests.cpp index c7de65867c..1946dcd5fb 100644 --- a/src/transactions/test/SetTrustLineFlagsTests.cpp +++ b/src/transactions/test/SetTrustLineFlagsTests.cpp @@ -105,7 +105,7 @@ getNumOffers(Application& app, TestAccount const& account, Asset const& asset) TEST_CASE_VERSIONS("set trustline flags", "[tx][settrustlineflags]") { - auto const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + auto const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; auto app = createTestApplication(clock, cfg); @@ -380,7 +380,7 @@ TEST_CASE_VERSIONS("revoke from pool", { VirtualClock clock; auto app = createTestApplication( - clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS)); + clock, getTestConfig(0, Config::TESTDB_IN_MEMORY)); // set up world auto root = TestAccount::createRoot(*app); diff --git a/src/transactions/test/SorobanTxTestUtils.cpp b/src/transactions/test/SorobanTxTestUtils.cpp index 654f2739ec..fa2adde48f 100644 --- a/src/transactions/test/SorobanTxTestUtils.cpp +++ b/src/transactions/test/SorobanTxTestUtils.cpp @@ -393,16 +393,16 @@ makeSorobanCreateContractTx(Application& app, TestAccount& source, } TransactionFrameBaseConstPtr -sorobanTransactionFrameFromOps(Hash const& networkID, TestAccount& source, - std::vector const& ops, - std::vector const& opKeys, - SorobanInvocationSpec const& spec, - std::optional memo, - std::optional seq) +sorobanTransactionFrameFromOps( + Hash const& networkID, TestAccount& source, + std::vector const& ops, std::vector const& opKeys, + SorobanInvocationSpec const& spec, std::optional memo, + std::optional seq, + std::optional> proofs) { return sorobanTransactionFrameFromOps( networkID, source, ops, opKeys, spec.getResources(), - spec.getInclusionFee(), spec.getResourceFee()); + spec.getInclusionFee(), spec.getResourceFee(), memo, seq, proofs); } SorobanInvocationSpec::SorobanInvocationSpec(SorobanResources const& resources, @@ -675,6 +675,13 @@ TestContract::Invocation::withSpec(SorobanInvocationSpec const& spec) return *this; } +TestContract::Invocation& +TestContract::Invocation::withProofs(xdr::xvector const& proofs) +{ + mProofs = proofs; + return *this; +} + SorobanInvocationSpec TestContract::Invocation::getSpec() { @@ -691,7 +698,7 @@ TestContract::Invocation::createTx(TestAccount* source) auto& acc = source ? *source : mTest.getRoot(); return sorobanTransactionFrameFromOps(mTest.getApp().getNetworkID(), acc, - {mOp}, {}, mSpec); + {mOp}, {}, mSpec, {}, {}, mProofs); } TestContract::Invocation& @@ -702,9 +709,9 @@ TestContract::Invocation::withExactNonRefundableResourceFee() // enable tests that rely on the exact refundable fee value. // Note, that we don't use the root account here in order to not mess up // the sequence numbers. - auto dummyTx = sorobanTransactionFrameFromOps(mTest.getApp().getNetworkID(), - mTest.getDummyAccount(), - {mOp}, {}, mSpec); + auto dummyTx = sorobanTransactionFrameFromOps( + mTest.getApp().getNetworkID(), mTest.getDummyAccount(), {mOp}, {}, + mSpec, {}, {}, mProofs); auto txSize = xdr::xdr_size(dummyTx->getEnvelope()); auto fee = sorobanResourceFee(mTest.getApp(), mSpec.getResources(), txSize, 0); @@ -1088,13 +1095,15 @@ SorobanTest::createExtendOpTx(SorobanResources const& resources, TransactionFrameBaseConstPtr SorobanTest::createRestoreTx(SorobanResources const& resources, uint32_t fee, - int64_t refundableFee, TestAccount* source) + int64_t refundableFee, TestAccount* source, + std::optional> proofs) { Operation op; op.body.type(RESTORE_FOOTPRINT); auto& acc = source ? *source : getRoot(); return sorobanTransactionFrameFromOps(getApp().getNetworkID(), acc, {op}, - {}, resources, fee, refundableFee); + {}, resources, fee, refundableFee, {}, + {}, proofs); } bool @@ -1151,7 +1160,8 @@ SorobanTest::isEntryLive(LedgerKey const& k, uint32_t ledgerSeq) void SorobanTest::invokeRestoreOp(xdr::xvector const& readWrite, - int64_t expectedRefundableFeeCharged) + int64_t expectedRefundableFeeCharged, + std::optional> proofs) { SorobanResources resources; resources.footprint.readWrite = readWrite; @@ -1160,7 +1170,7 @@ SorobanTest::invokeRestoreOp(xdr::xvector const& readWrite, resources.writeBytes = 10'000; auto resourceFee = 300'000 + 40'000 * readWrite.size(); - auto tx = createRestoreTx(resources, 1'000, resourceFee); + auto tx = createRestoreTx(resources, 1'000, resourceFee, nullptr, proofs); invokeArchivalOp(tx, expectedRefundableFeeCharged); } @@ -1643,8 +1653,9 @@ ContractStorageTestClient::get(std::string const& key, auto invocation = mContract.prepareInvocation(funcStr, {makeSymbolSCVal(key)}, *spec); bool isSuccess = invocation.withExactNonRefundableResourceFee().invoke(); - if (isSuccess && expectValue) + if (expectValue) { + REQUIRE(isSuccess); REQUIRE(*expectValue == invocation.getReturnValue().u64()); } return *invocation.getResultCode(); diff --git a/src/transactions/test/SorobanTxTestUtils.h b/src/transactions/test/SorobanTxTestUtils.h index 99211a34f2..b7456a855b 100644 --- a/src/transactions/test/SorobanTxTestUtils.h +++ b/src/transactions/test/SorobanTxTestUtils.h @@ -178,6 +178,7 @@ class TestContract std::optional mResultCode; int64_t mFeeCharged = 0; std::optional mTxMeta; + std::optional> mProofs; bool mDeduplicateFootprint = false; void deduplicateFootprint(); @@ -205,6 +206,8 @@ class TestContract Invocation& withSpec(SorobanInvocationSpec const& spec); + Invocation& withProofs(xdr::xvector const& proofs); + SorobanInvocationSpec getSpec(); TransactionFrameBaseConstPtr createTx(TestAccount* source = nullptr); @@ -301,9 +304,10 @@ class SorobanTest createExtendOpTx(SorobanResources const& resources, uint32_t extendTo, uint32_t fee, int64_t refundableFee, TestAccount* source = nullptr); - TransactionFrameBaseConstPtr - createRestoreTx(SorobanResources const& resources, uint32_t fee, - int64_t refundableFee, TestAccount* source = nullptr); + TransactionFrameBaseConstPtr createRestoreTx( + SorobanResources const& resources, uint32_t fee, int64_t refundableFee, + TestAccount* source = nullptr, + std::optional> proofs = std::nullopt); bool isTxValid(TransactionFrameBaseConstPtr tx); @@ -313,8 +317,10 @@ class SorobanTest uint32_t getTTL(LedgerKey const& k); bool isEntryLive(LedgerKey const& k, uint32_t ledgerSeq); - void invokeRestoreOp(xdr::xvector const& readWrite, - int64_t expectedRefundableFeeCharged); + void invokeRestoreOp( + xdr::xvector const& readWrite, + int64_t expectedRefundableFeeCharged, + std::optional> proofs = std::nullopt); void invokeExtendOp( xdr::xvector const& readOnly, uint32_t extendTo, std::optional expectedRefundableFeeCharged = std::nullopt); diff --git a/src/transactions/test/TransactionTestFrame.cpp b/src/transactions/test/TransactionTestFrame.cpp index 9e7278a842..2acc837431 100644 --- a/src/transactions/test/TransactionTestFrame.cpp +++ b/src/transactions/test/TransactionTestFrame.cpp @@ -329,6 +329,20 @@ TransactionTestFrame::sorobanResources() const return mTransactionFrame->sorobanResources(); } +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION +bool +TransactionTestFrame::hasSorobanProofs() const +{ + return mTransactionFrame->hasSorobanProofs(); +} + +xdr::xvector const& +TransactionTestFrame::sorobanProofs() const +{ + return mTransactionFrame->sorobanProofs(); +} +#endif + xdr::xvector const& TransactionTestFrame::getDiagnosticEvents() const { diff --git a/src/transactions/test/TransactionTestFrame.h b/src/transactions/test/TransactionTestFrame.h index c13b928664..7216f59e11 100644 --- a/src/transactions/test/TransactionTestFrame.h +++ b/src/transactions/test/TransactionTestFrame.h @@ -140,5 +140,10 @@ class TransactionTestFrame : public TransactionFrameBase { return true; } + +#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION + bool hasSorobanProofs() const override; + xdr::xvector const& sorobanProofs() const override; +#endif }; } \ No newline at end of file diff --git a/src/transactions/test/TxEnvelopeTests.cpp b/src/transactions/test/TxEnvelopeTests.cpp index 99dc560533..c8ed667e6b 100644 --- a/src/transactions/test/TxEnvelopeTests.cpp +++ b/src/transactions/test/TxEnvelopeTests.cpp @@ -86,7 +86,7 @@ TEST_CASE("txset - correct apply order", "[tx][envelope]") TEST_CASE_VERSIONS("txenvelope", "[tx][envelope]") { - Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS); + Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY); VirtualClock clock; auto app = createTestApplication(clock, cfg); diff --git a/src/util/ArchivalProofs.cpp b/src/util/ArchivalProofs.cpp new file mode 100644 index 0000000000..ecfef7f000 --- /dev/null +++ b/src/util/ArchivalProofs.cpp @@ -0,0 +1,307 @@ +// Copyright 2024 Stellar Development Foundation and contributors. Licensed +// under the Apache License, Version 2.0. See the COPYING file at the root +// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 + +#include "util/ArchivalProofs.h" +#include "bucket/BucketListSnapshot.h" +#include "bucket/BucketManager.h" +#include "ledger/LedgerTypeUtils.h" +#include "main/Application.h" +#include "main/Config.h" +#include "util/GlobalChecks.h" +#include "util/UnorderedSet.h" +#include "xdr/Stellar-ledger-entries.h" +#include "xdr/Stellar-transaction.h" +#include +#include +#include + +namespace stellar +{ + +bool +checkCreationProofValidity(xdr::xvector const& proofs) +{ + if (proofs.size() > 1) + { + return false; + } + else if (proofs.empty()) + { + return true; + } + + auto const& proof = proofs[0]; + if (proof.body.t() != NONEXISTENCE) + { + return false; + } + + auto numProvenKeys = proof.body.nonexistenceProof().keysToProve.size(); + if (numProvenKeys == 0) + { + return false; + } + + // Require unique keys + UnorderedSet keys(numProvenKeys); + for (auto const& key : proof.body.nonexistenceProof().keysToProve) + { + if (key.type() != CONTRACT_DATA || + key.contractData().durability != PERSISTENT) + { + return false; + } + + if (!keys.insert(key).second) + { + return false; + } + } + + return true; +} + +bool +isCreatedKeyProven(Application& app, LedgerKey const& lk, + xdr::xvector const& proofs) +{ + // Only persistent contract data entries need creation proofs + if (lk.type() == CONTRACT_DATA && + lk.contractData().durability == PERSISTENT) + { +#ifdef BUILD_TESTS + if (app.getConfig().ARTIFICIALLY_SIMULATE_ARCHIVE_FILTER_MISS) + { + // Artificially require proofs for "miss" keys + if (lk.contractData().key.type() == SCV_SYMBOL && + lk.contractData().key.sym() == "miss") + { + if (proofs.size() != 1) + { + return false; + } + + releaseAssert(proofs.size() == 1); + releaseAssert(proofs[0].body.t() == NONEXISTENCE); + for (auto const& key : + proofs[0].body.nonexistenceProof().keysToProve) + { + if (key == lk) + { + return true; + } + } + + return false; + } + } + } +#endif + + return true; +} + +bool +addCreationProof(bool simulateBloomMiss, LedgerKey const& lk, + xdr::xvector& proofs) +{ +#ifdef BUILD_TESTS + if (simulateBloomMiss) + { + // Only persistent contract data entries need creation proofs + if (lk.type() != CONTRACT_DATA || + lk.contractData().durability != PERSISTENT) + { + return true; + } + + for (auto& proof : proofs) + { + if (proof.body.t() == NONEXISTENCE) + { + for (auto const& key : + proof.body.nonexistenceProof().keysToProve) + { + if (key == lk) + { + // Proof already exists + return true; + } + } + + proof.body.nonexistenceProof().keysToProve.push_back(lk); + return true; + } + } + + proofs.emplace_back(); + auto& nonexistenceProof = proofs.back(); + nonexistenceProof.body.t(NONEXISTENCE); + nonexistenceProof.body.nonexistenceProof().keysToProve.push_back(lk); + } +#endif + + return true; +} + +bool +checkRestorationProofValidity(xdr::xvector const& proofs) +{ + if (proofs.size() > 1) + { + return false; + } + else if (proofs.empty()) + { + return true; + } + + auto const& proof = proofs[0]; + if (proof.body.t() != EXISTENCE) + { + return false; + } + + auto numProvenEntries = proof.body.existenceProof().entriesToProve.size(); + if (numProvenEntries == 0) + { + return false; + } + + // Require unique keys + UnorderedSet keys(numProvenEntries); + for (auto const& be : proof.body.existenceProof().entriesToProve) + { + // All entries proven must be Archived BucketEntries + if (be.type() != COLD_ARCHIVE_ARCHIVED_LEAF) + { + return false; + } + + auto key = LedgerEntryKey(be.archivedLeaf().archivedEntry); + if (!isPersistentEntry(key)) + { + return false; + } + + if (!keys.insert(key).second) + { + return false; + } + } + + return true; +} + +std::shared_ptr +getRestoredEntryFromProof(Application& app, LedgerKey const& lk, + xdr::xvector const& proofs) +{ + releaseAssertOrThrow(isPersistentEntry(lk)); + auto hotArchive = + app.getBucketManager().getSearchableHotArchiveBucketListSnapshot(); + auto entry = hotArchive->load(lk); +#ifdef BUILD_TESTS + if (app.getConfig().REQUIRE_PROOFS_FOR_ALL_EVICTED_ENTRIES) + { + if (proofs.size() != 1) + { + return nullptr; + } + + // Should have been checked already by checkRestorationProofValidity + releaseAssertOrThrow(proofs[0].body.t() == EXISTENCE); + for (auto const& be : proofs[0].body.existenceProof().entriesToProve) + { + if (LedgerEntryKey(be.archivedLeaf().archivedEntry) == lk) + { + if (entry && + entry->archivedEntry() == be.archivedLeaf().archivedEntry) + { + return std::make_shared( + entry->archivedEntry()); + } + + return nullptr; + } + } + + return nullptr; + } +#endif + + if (entry && + entry->type() == HotArchiveBucketEntryType::HOT_ARCHIVE_ARCHIVED) + { + return std::make_shared(entry->archivedEntry()); + } + + return nullptr; +} + +bool +addRestorationProof( + std::shared_ptr hotArchive, + LedgerKey const& lk, xdr::xvector& proofs, + std::optional ledgerSeq) +{ +#ifdef BUILD_TESTS + // For now only support proof generation for testing + releaseAssertOrThrow(isPersistentEntry(lk)); + + std::shared_ptr entry = nullptr; + if (ledgerSeq) + { + auto entryOp = hotArchive->loadKeysFromLedger({lk}, *ledgerSeq); + if (!entryOp) + { + throw std::invalid_argument("LedgerSeq not found"); + } + + entry = + std::make_shared(std::move(entryOp->at(0))); + } + else + { + entry = hotArchive->load(lk); + } + + if (!entry || + entry->type() != HotArchiveBucketEntryType::HOT_ARCHIVE_ARCHIVED) + { + return false; + } + + ColdArchiveBucketEntry be; + be.type(COLD_ARCHIVE_ARCHIVED_LEAF); + be.archivedLeaf().archivedEntry = entry->archivedEntry(); + + for (auto& proof : proofs) + { + if (proof.body.t() == EXISTENCE) + { + for (auto const& be : proof.body.existenceProof().entriesToProve) + { + if (LedgerEntryKey(be.archivedLeaf().archivedEntry) == lk) + { + // Proof already exists + return true; + } + } + + proof.body.existenceProof().entriesToProve.push_back(be); + return true; + } + } + + proofs.emplace_back(); + auto& existenceProof = proofs.back(); + existenceProof.body.t(EXISTENCE); + existenceProof.body.existenceProof().entriesToProve.push_back(be); + +#endif + + return true; +} +} \ No newline at end of file diff --git a/src/util/ArchivalProofs.h b/src/util/ArchivalProofs.h new file mode 100644 index 0000000000..1f021e9b45 --- /dev/null +++ b/src/util/ArchivalProofs.h @@ -0,0 +1,60 @@ +#pragma once + +// Copyright 2024 Stellar Development Foundation and contributors. Licensed +// under the Apache License, Version 2.0. See the COPYING file at the root +// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 + +#include "xdr/Stellar-transaction.h" +#include + +#include + +namespace stellar +{ + +class Application; +class SearchableHotArchiveBucketListSnapshot; + +// Returns true if the proof structure is valid. This function does not check +// the validity of the proof itself, just that the structure is correct (i.e. no +// duplicate keys being proven). +bool checkCreationProofValidity(xdr::xvector const& proofs); + +// Returns true if the new entry being created has a valid proof or if no proof +// is required (i.e. no archival filter misses or type does not require proofs). +// Warning: assumes proofs has valid structure according to +// checkCreationProofValidity +bool isCreatedKeyProven(Application& app, LedgerKey const& lk, + xdr::xvector const& proofs); + +// Adds a creation proof for lk to proofs. Returns true if a proof was added or +// is not necessary. Returns false if no valid proof exists. +bool addCreationProof(bool simulateBloomMiss, LedgerKey const& lk, + xdr::xvector& proofs); + +// Returns true if the proof structure is valid. This function does not check +// the validity of the proof itself, just that the structure is correct (i.e. no +// duplicate keys being proven). +bool checkRestorationProofValidity(xdr::xvector const& proofs); + +// Returns the LedgerEntry for the given key being restored if valid proof is +// provided or if no proof is required (i.e. entry exists on in HotArchive). If +// the proof for the given key is not valid, returns nullptr. This function will +// check the HotArchive, but will not search the live BucketList. Is is the +// responsibility of the caller to search the live BucketList before calling +// this function. Warning: assumes proofs has valid structure according to +// checkRestorationProofValidity +std::shared_ptr +getRestoredEntryFromProof(Application& app, LedgerKey const& lk, + xdr::xvector const& proofs); + +// Adds a restoration proof for lk to proofs. Returns true if a proof was added +// or is not necessary. Returns false if no valid proof exists. +// If ledgerSeq is specified, the entry will be restored based on the snapshot +// at that ledger. TODO: Fix race condition when ledgerSeq is populated by +// loading keys in batches. +bool addRestorationProof( + std::shared_ptr hotArchive, + LedgerKey const& lk, xdr::xvector& proofs, + std::optional ledgerSeq = std::nullopt); +} \ No newline at end of file diff --git a/src/util/ProtocolVersion.h b/src/util/ProtocolVersion.h index b908b8f4a9..32341840c9 100644 --- a/src/util/ProtocolVersion.h +++ b/src/util/ProtocolVersion.h @@ -34,7 +34,8 @@ enum class ProtocolVersion : uint32_t V_19, V_20, V_21, - V_22 + V_22, + V_23 }; // Checks whether provided protocolVersion is before (i.e. strictly lower than) diff --git a/src/util/test/ArchivalProofsTests.cpp b/src/util/test/ArchivalProofsTests.cpp new file mode 100644 index 0000000000..8405eec878 --- /dev/null +++ b/src/util/test/ArchivalProofsTests.cpp @@ -0,0 +1,244 @@ +// Copyright 2024 Stellar Development Foundation and contributors. Licensed +// under the Apache License, Version 2.0. See the COPYING file at the root +// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0 + +#include "ledger/test/LedgerTestUtils.h" +#include "lib/catch.hpp" +#include "test/TestUtils.h" +#include "test/test.h" +#include "util/ArchivalProofs.h" +#include "xdr/Stellar-contract.h" +#include "xdr/Stellar-ledger-entries.h" +#include "xdr/Stellar-transaction.h" + +using namespace stellar; + +TEST_CASE("creation proofs", "[archival][soroban]") +{ + VirtualClock clock; + auto cfg = getTestConfig(); + cfg.ARTIFICIALLY_SIMULATE_ARCHIVE_FILTER_MISS = true; + auto app = createTestApplication(clock, cfg); + + SECTION("roundtrip") + { + xdr::xvector proofs; + auto entries = + LedgerTestUtils::generateValidUniqueLedgerEntriesWithTypes( + {CONTRACT_DATA}, 2); + bool isAccount = true; + for (auto& e : entries) + { + e.data.contractData().durability = PERSISTENT; + e.data.contractData().key.type(SCV_SYMBOL); + + // We want two unique LedgerKeys, both the contract data keys should + // both be "miss" + e.data.contractData().key.sym() = "miss"; + if (isAccount) + { + e.data.contractData().contract.type( + SCAddressType::SC_ADDRESS_TYPE_ACCOUNT); + isAccount = false; + } + else + { + e.data.contractData().contract.type( + SCAddressType::SC_ADDRESS_TYPE_CONTRACT); + } + } + + // Empty proof fails + REQUIRE(checkCreationProofValidity(proofs)); + REQUIRE(!isCreatedKeyProven(*app, LedgerEntryKey(entries[0]), proofs)); + REQUIRE(addCreationProof( + app->getConfig().ARTIFICIALLY_SIMULATE_ARCHIVE_FILTER_MISS, + LedgerEntryKey(entries[0]), proofs)); + REQUIRE(checkCreationProofValidity(proofs)); + REQUIRE(isCreatedKeyProven(*app, LedgerEntryKey(entries[0]), proofs)); + + REQUIRE(proofs.size() == 1); + REQUIRE(proofs.back().body.t() == NONEXISTENCE); + REQUIRE(proofs.back().body.nonexistenceProof().keysToProve.size() == 1); + + // Proof with wrong key fails + REQUIRE(!isCreatedKeyProven(*app, LedgerEntryKey(entries[1]), proofs)); + + // Proofs work for multiple keys + REQUIRE(addCreationProof( + app->getConfig().ARTIFICIALLY_SIMULATE_ARCHIVE_FILTER_MISS, + LedgerEntryKey(entries[1]), proofs)); + REQUIRE(checkCreationProofValidity(proofs)); + REQUIRE(isCreatedKeyProven(*app, LedgerEntryKey(entries[1]), proofs)); + REQUIRE(isCreatedKeyProven(*app, LedgerEntryKey(entries[0]), proofs)); + + REQUIRE(proofs.size() == 1); + REQUIRE(proofs.back().body.t() == NONEXISTENCE); + REQUIRE(proofs.back().body.nonexistenceProof().keysToProve.size() == 2); + } + + SECTION("temp and code do not require creation proofs") + { + xdr::xvector proofs; + auto temp = + LedgerTestUtils::generateValidLedgerEntryOfType(CONTRACT_DATA); + temp.data.contractData().durability = TEMPORARY; + auto code = + LedgerTestUtils::generateValidLedgerEntryOfType(CONTRACT_CODE); + + REQUIRE(checkCreationProofValidity(proofs)); + REQUIRE(isCreatedKeyProven(*app, LedgerEntryKey(temp), proofs)); + REQUIRE(addCreationProof( + app->getConfig().ARTIFICIALLY_SIMULATE_ARCHIVE_FILTER_MISS, + LedgerEntryKey(temp), proofs)); + REQUIRE(checkCreationProofValidity(proofs)); + REQUIRE(proofs.size() == 0); + + REQUIRE(isCreatedKeyProven(*app, LedgerEntryKey(code), proofs)); + REQUIRE(addCreationProof( + app->getConfig().ARTIFICIALLY_SIMULATE_ARCHIVE_FILTER_MISS, + LedgerEntryKey(code), proofs)); + REQUIRE(checkCreationProofValidity(proofs)); + REQUIRE(proofs.size() == 0); + } +} + +TEST_CASE("creation proof structure validity") +{ + xdr::xvector proofs; + auto temp = LedgerTestUtils::generateValidLedgerEntryOfType(CONTRACT_DATA); + temp.data.contractData().durability = TEMPORARY; + auto code = LedgerTestUtils::generateValidLedgerEntryOfType(CONTRACT_CODE); + auto classic = LedgerTestUtils::generateValidLedgerEntryOfType(TRUSTLINE); + + // Empty proof is valid + REQUIRE(checkCreationProofValidity(proofs)); + + SECTION("bad proof type") + { + proofs.emplace_back(); + proofs.back().body.t(EXISTENCE); + REQUIRE(!checkCreationProofValidity(proofs)); + } + + SECTION("too many proofs structures") + { + proofs.emplace_back(); + proofs.back().body.t(NONEXISTENCE); + proofs.emplace_back(); + proofs.back().body.t(NONEXISTENCE); + REQUIRE(!checkCreationProofValidity(proofs)); + } + + SECTION("invalid keys") + { + proofs.emplace_back(); + proofs.back().body.t(NONEXISTENCE); + + SECTION("classic") + { + proofs.back().body.nonexistenceProof().keysToProve.emplace_back( + LedgerEntryKey(classic)); + REQUIRE(!checkCreationProofValidity(proofs)); + } + + SECTION("temp") + { + proofs.back().body.nonexistenceProof().keysToProve.emplace_back( + LedgerEntryKey(temp)); + REQUIRE(!checkCreationProofValidity(proofs)); + } + + SECTION("code") + { + proofs.back().body.nonexistenceProof().keysToProve.emplace_back( + LedgerEntryKey(code)); + REQUIRE(!checkCreationProofValidity(proofs)); + } + } + + SECTION("duplicate keys") + { + proofs.emplace_back(); + proofs.back().body.t(NONEXISTENCE); + auto entry = + LedgerTestUtils::generateValidLedgerEntryOfType(CONTRACT_DATA); + entry.data.contractData().durability = PERSISTENT; + proofs.back().body.nonexistenceProof().keysToProve.emplace_back( + LedgerEntryKey(entry)); + REQUIRE(checkCreationProofValidity(proofs)); + proofs.back().body.nonexistenceProof().keysToProve.emplace_back( + LedgerEntryKey(entry)); + REQUIRE(!checkCreationProofValidity(proofs)); + } +} + +TEST_CASE("restoration proof structure validity") +{ + xdr::xvector proofs; + auto temp = LedgerTestUtils::generateValidLedgerEntryOfType(CONTRACT_DATA); + temp.data.contractData().durability = TEMPORARY; + auto classic = LedgerTestUtils::generateValidLedgerEntryOfType(TRUSTLINE); + auto be = ColdArchiveBucketEntry(); + be.type(COLD_ARCHIVE_ARCHIVED_LEAF); + + // Empty proof is valid + REQUIRE(checkRestorationProofValidity(proofs)); + + SECTION("bad proof type") + { + proofs.emplace_back(); + proofs.back().body.t(NONEXISTENCE); + REQUIRE(!checkRestorationProofValidity(proofs)); + } + + SECTION("too many proofs structures") + { + proofs.emplace_back(); + proofs.back().body.t(EXISTENCE); + proofs.emplace_back(); + proofs.back().body.t(EXISTENCE); + REQUIRE(!checkRestorationProofValidity(proofs)); + } + + SECTION("invalid keys") + { + proofs.emplace_back(); + proofs.back().body.t(EXISTENCE); + + SECTION("classic") + { + be.archivedLeaf().archivedEntry = classic; + proofs.back().body.existenceProof().entriesToProve.emplace_back(be); + REQUIRE(!checkRestorationProofValidity(proofs)); + } + + SECTION("temp") + { + be.archivedLeaf().archivedEntry = temp; + proofs.back().body.existenceProof().entriesToProve.emplace_back(be); + REQUIRE(!checkRestorationProofValidity(proofs)); + } + + SECTION("bad BucketEntry type") + { + be.type(COLD_ARCHIVE_DELETED_LEAF); + proofs.back().body.existenceProof().entriesToProve.emplace_back(be); + REQUIRE(!checkRestorationProofValidity(proofs)); + } + } + + SECTION("duplicate keys") + { + proofs.emplace_back(); + proofs.back().body.t(EXISTENCE); + auto entry = + LedgerTestUtils::generateValidLedgerEntryOfType(CONTRACT_DATA); + entry.data.contractData().durability = PERSISTENT; + be.archivedLeaf().archivedEntry = entry; + proofs.back().body.existenceProof().entriesToProve.emplace_back(be); + REQUIRE(checkRestorationProofValidity(proofs)); + proofs.back().body.existenceProof().entriesToProve.emplace_back(be); + REQUIRE(!checkRestorationProofValidity(proofs)); + } +} \ No newline at end of file diff --git a/src/util/test/XDRStreamTests.cpp b/src/util/test/XDRStreamTests.cpp index 7710562c91..16754b5a1b 100644 --- a/src/util/test/XDRStreamTests.cpp +++ b/src/util/test/XDRStreamTests.cpp @@ -33,7 +33,7 @@ TEST_CASE("XDROutputFileStream fail modes", "[xdrstream]") size_t bytes = 0; auto ledgerEntries = LedgerTestUtils::generateValidLedgerEntries(1); auto bucketEntries = - Bucket::convertToBucketEntry(false, {}, ledgerEntries, {}); + LiveBucket::convertToBucketEntry(false, {}, ledgerEntries, {}); REQUIRE_THROWS_AS(out.writeOne(bucketEntries[0], &hasher, &bytes), std::runtime_error); @@ -53,7 +53,7 @@ TEST_CASE("XDROutputFileStream fsync bench", "[!hide][xdrstream][bench]") SHA256 hasher; auto ledgerEntries = LedgerTestUtils::generateValidLedgerEntries(10000000); auto bucketEntries = - Bucket::convertToBucketEntry(false, {}, ledgerEntries, {}); + LiveBucket::convertToBucketEntry(false, {}, ledgerEntries, {}); fs::mkpath(cfg.BUCKET_DIR_PATH); diff --git a/src/util/types.h b/src/util/types.h index 0a893c5a1e..e646a5c35a 100644 --- a/src/util/types.h +++ b/src/util/types.h @@ -128,6 +128,22 @@ assetToString(const Asset& asset) return r; }; +inline LedgerKey +getBucketLedgerKey(HotArchiveBucketEntry const& be) +{ + switch (be.type()) + { + case HOT_ARCHIVE_LIVE: + case HOT_ARCHIVE_DELETED: + return be.key(); + case HOT_ARCHIVE_ARCHIVED: + return LedgerEntryKey(be.archivedEntry()); + case HOT_ARCHIVE_METAENTRY: + default: + throw std::invalid_argument("Tried to get key for METAENTRY"); + } +} + inline LedgerKey getBucketLedgerKey(BucketEntry const& be) {