From 59586a588517b0a37bd90a0e664cc77acef4b73e Mon Sep 17 00:00:00 2001 From: yihuang Date: Tue, 2 Apr 2024 11:22:55 +0800 Subject: [PATCH] Problem: custom cache store impl not unified with sdk (#446) * Problem: custom cache store impl not unified with sdk Solution: - unify the refactored cache store with sdk Update CHANGELOG.md Signed-off-by: yihuang mount object stores fix fix test cleanup fix ctx fix lint * update deps --- CHANGELOG.md | 1 + app/app.go | 16 +- go.mod | 6 +- go.sum | 8 +- gomod2nix.toml | 10 +- store/cachekv/README.md | 140 ----- store/cachekv/bench_helper_test.go | 44 -- store/cachekv/benchmark_test.go | 133 ----- store/cachekv/internal/btree.go | 121 ---- store/cachekv/internal/btree_test.go | 208 ------- store/cachekv/internal/memiterator.go | 123 ----- store/cachekv/internal/mergeiterator.go | 235 -------- store/cachekv/store.go | 179 ------ store/cachekv/store_bench_test.go | 149 ----- store/cachekv/store_test.go | 707 ------------------------ store/cachemulti/store.go | 191 ------- store/cachemulti/store_test.go | 25 - x/evm/keeper/keeper.go | 10 - x/evm/statedb/interfaces.go | 3 - x/evm/statedb/native.go | 6 +- x/evm/statedb/statedb.go | 83 +-- x/evm/statedb/statedb_test.go | 16 +- 22 files changed, 79 insertions(+), 2335 deletions(-) delete mode 100644 store/cachekv/README.md delete mode 100644 store/cachekv/bench_helper_test.go delete mode 100644 store/cachekv/benchmark_test.go delete mode 100644 store/cachekv/internal/btree.go delete mode 100644 store/cachekv/internal/btree_test.go delete mode 100644 store/cachekv/internal/memiterator.go delete mode 100644 store/cachekv/internal/mergeiterator.go delete mode 100644 store/cachekv/store.go delete mode 100644 store/cachekv/store_bench_test.go delete mode 100644 store/cachekv/store_test.go delete mode 100644 store/cachemulti/store.go delete mode 100644 store/cachemulti/store_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 7594b839a6..7bb229a256 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -43,6 +43,7 @@ Ref: https://keepachangelog.com/en/1.0.0/ - (feemarket) [#433](https://github.com/crypto-org-chain/ethermint/pull/433) Fix sdk int conversion panic with baseFee. * (rpc) [#434](https://github.com/crypto-org-chain/ethermint/pull/434) No need gasPrice when patch gasUsed for `eth_getTransactionReceipt`. * (rpc) [#439](https://github.com/crypto-org-chain/ethermint/pull/439), [#441](https://github.com/crypto-org-chain/ethermint/pull/441) Align trace response for failed tx with go-ethereum. +* (statedb) [#446](https://github.com/crypto-org-chain/ethermint/pull/446) Re-use the cache store implementation with sdk. ### State Machine Breaking diff --git a/app/app.go b/app/app.go index da5747bc57..58afb930fa 100644 --- a/app/app.go +++ b/app/app.go @@ -233,6 +233,7 @@ type EthermintApp struct { keys map[string]*storetypes.KVStoreKey tkeys map[string]*storetypes.TransientStoreKey memKeys map[string]*storetypes.MemoryStoreKey + okeys map[string]*storetypes.ObjectStoreKey // keepers AccountKeeper authkeeper.AccountKeeper @@ -330,6 +331,7 @@ func NewEthermintApp( // Add the EVM transient store key tkeys := storetypes.NewTransientStoreKeys(paramstypes.TStoreKey, evmtypes.TransientKey, feemarkettypes.TransientKey) memKeys := storetypes.NewMemoryStoreKeys(capabilitytypes.MemStoreKey) + okeys := storetypes.NewObjectStoreKeys(banktypes.ObjectStoreKey) // load state streaming if enabled if err := bApp.RegisterStreamingServices(appOpts, keys); err != nil { @@ -347,6 +349,7 @@ func NewEthermintApp( keys: keys, tkeys: tkeys, memKeys: memKeys, + okeys: okeys, } // init params keeper and subspaces @@ -386,6 +389,7 @@ func NewEthermintApp( app.BankKeeper = bankkeeper.NewBaseKeeper( appCodec, runtime.NewKVStoreService(keys[banktypes.StoreKey]), + okeys[banktypes.ObjectStoreKey], app.AccountKeeper, app.BlockedAddrs(), authAddr, @@ -496,16 +500,6 @@ func NewEthermintApp( // Set authority to x/gov module account to only expect the module account to update params evmSs := app.GetSubspace(evmtypes.ModuleName) - allKeys := make(map[string]storetypes.StoreKey, len(keys)+len(tkeys)+len(memKeys)) - for k, v := range keys { - allKeys[k] = v - } - for k, v := range tkeys { - allKeys[k] = v - } - for k, v := range memKeys { - allKeys[k] = v - } app.EvmKeeper = evmkeeper.NewKeeper( appCodec, runtime.NewKVStoreService(keys[evmtypes.StoreKey]), @@ -513,7 +507,6 @@ func NewEthermintApp( app.AccountKeeper, app.BankKeeper, app.StakingKeeper, app.FeeMarketKeeper, tracer, nil, - allKeys, ) // register the proposal types @@ -769,6 +762,7 @@ func NewEthermintApp( app.MountKVStores(keys) app.MountTransientStores(tkeys) app.MountMemoryStores(memKeys) + app.MountObjectStores(okeys) // initialize BaseApp app.SetInitChainer(app.InitChainer) diff --git a/go.mod b/go.mod index 2e08090299..ced6559479 100644 --- a/go.mod +++ b/go.mod @@ -46,7 +46,6 @@ require ( github.com/spf13/viper v1.18.2 github.com/status-im/keycard-go v0.2.0 github.com/stretchr/testify v1.9.0 - github.com/tidwall/btree v1.7.0 github.com/tidwall/gjson v1.14.4 github.com/tidwall/sjson v1.2.5 github.com/tyler-smith/go-bip39 v1.1.0 @@ -205,6 +204,7 @@ require ( github.com/subosito/gotenv v1.6.0 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect github.com/tendermint/go-amino v0.16.0 // indirect + github.com/tidwall/btree v1.7.0 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.0 // indirect github.com/tklauser/go-sysconf v0.3.10 // indirect @@ -241,9 +241,13 @@ require ( ) replace ( + // release/v0.50.x + cosmossdk.io/store => github.com/crypto-org-chain/cosmos-sdk/store v0.0.0-20240402015425-ec314e8e2d07 // use cosmos keyring github.com/99designs/keyring => github.com/cosmos/keyring v1.2.0 github.com/cockroachdb/pebble => github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 + // release/v0.50.x + github.com/cosmos/cosmos-sdk => github.com/crypto-org-chain/cosmos-sdk v0.46.0-beta2.0.20240402015425-ec314e8e2d07 github.com/ethereum/go-ethereum => github.com/crypto-org-chain/go-ethereum v1.10.20-0.20231207063621-43cf32d91c3e // Fix upstream GHSA-h395-qcrw-5vmq vulnerability. // TODO Remove it: https://github.com/cosmos/cosmos-sdk/issues/10409 diff --git a/go.sum b/go.sum index b2c472bda2..7291af0693 100644 --- a/go.sum +++ b/go.sum @@ -200,8 +200,6 @@ cosmossdk.io/log v1.3.1 h1:UZx8nWIkfbbNEWusZqzAx3ZGvu54TZacWib3EzUYmGI= cosmossdk.io/log v1.3.1/go.mod h1:2/dIomt8mKdk6vl3OWJcPk2be3pGOS8OQaLUM/3/tCM= cosmossdk.io/math v1.3.0 h1:RC+jryuKeytIiictDslBP9i1fhkVm6ZDmZEoNP316zE= cosmossdk.io/math v1.3.0/go.mod h1:vnRTxewy+M7BtXBNFybkuhSH4WfedVAAnERHgVFhp3k= -cosmossdk.io/store v1.0.2 h1:lSg5BTvJBHUDwswNNyeh4K/CbqiHER73VU4nDNb8uk0= -cosmossdk.io/store v1.0.2/go.mod h1:EFtENTqVTuWwitGW1VwaBct+yDagk7oG/axBMPH+FXs= cosmossdk.io/tools/confix v0.1.1 h1:aexyRv9+y15veH3Qw16lxQwo+ki7r2I+g0yNTEFEQM8= cosmossdk.io/tools/confix v0.1.1/go.mod h1:nQVvP1tHsGXS83PonPVWJtSbddIqyjEw99L4M3rPJyQ= cosmossdk.io/x/circuit v0.1.0 h1:IAej8aRYeuOMritczqTlljbUVHq1E85CpBqaCTwYgXs= @@ -381,8 +379,6 @@ github.com/cosmos/cosmos-db v1.0.2 h1:hwMjozuY1OlJs/uh6vddqnk9j7VamLv+0DBlbEXbAK github.com/cosmos/cosmos-db v1.0.2/go.mod h1:Z8IXcFJ9PqKK6BIsVOB3QXtkKoqUOp1vRvPT39kOXEA= github.com/cosmos/cosmos-proto v1.0.0-beta.4 h1:aEL7tU/rLOmxZQ9z4i7mzxcLbSCY48OdY7lIWTLG7oU= github.com/cosmos/cosmos-proto v1.0.0-beta.4/go.mod h1:oeB+FyVzG3XrQJbJng0EnV8Vljfk9XvTIpGILNU/9Co= -github.com/cosmos/cosmos-sdk v0.50.5-0.20240318121607-9a59234c4202 h1:rQbmfBwvL4PUdRbyor+Ro3WrX1MxjgOIexf5fct6iIQ= -github.com/cosmos/cosmos-sdk v0.50.5-0.20240318121607-9a59234c4202/go.mod h1:VAp+d9UcrbMZyZPetVZBOTQj/lNAOrcD2ADGoFCChCA= github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= github.com/cosmos/gogogateway v1.2.0 h1:Ae/OivNhp8DqBi/sh2A8a1D0y638GpL3tkmLQAiKxTE= @@ -417,6 +413,10 @@ github.com/creachadair/tomledit v0.0.24 h1:5Xjr25R2esu1rKCbQEmjZYlrhFkDspoAbAKb6 github.com/creachadair/tomledit v0.0.24/go.mod h1:9qHbShRWQzSCcn617cMzg4eab1vbLCOjOshAWSzWr8U= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/crypto-org-chain/cosmos-sdk v0.46.0-beta2.0.20240402015425-ec314e8e2d07 h1:XVb9zfI9joZzroBIBiiopd1Un4hWf0Wq2/iLfWTpSY8= +github.com/crypto-org-chain/cosmos-sdk v0.46.0-beta2.0.20240402015425-ec314e8e2d07/go.mod h1:nRk8EA8/fEG4zSme2i/Rq5z3k7TrlsHkOYhrY79hhD8= +github.com/crypto-org-chain/cosmos-sdk/store v0.0.0-20240402015425-ec314e8e2d07 h1:LlaT9o3Fly1MGX4MRGa/UcwAQzl7tewjryyJ2xTV9jg= +github.com/crypto-org-chain/cosmos-sdk/store v0.0.0-20240402015425-ec314e8e2d07/go.mod h1:lfuLI1f4o+0SGtlHQS4x5qsjRcZZfYqG8bp3k8hM0M8= github.com/crypto-org-chain/go-ethereum v1.10.20-0.20231207063621-43cf32d91c3e h1:vnyepPQ/m25+19xcTuBUdRxmltZ/EjVWNqEjhg7Ummk= github.com/crypto-org-chain/go-ethereum v1.10.20-0.20231207063621-43cf32d91c3e/go.mod h1:+a8pUj1tOyJ2RinsNQD4326YS+leSoKGiG/uVVb0x6Y= github.com/danieljoos/wincred v1.2.0 h1:ozqKHaLK0W/ii4KVbbvluM91W2H3Sh0BncbUNPS7jLE= diff --git a/gomod2nix.toml b/gomod2nix.toml index 4200d9a702..12956d9fa9 100644 --- a/gomod2nix.toml +++ b/gomod2nix.toml @@ -41,8 +41,9 @@ schema = 3 version = "v1.3.0" hash = "sha256-EEFK43Cr0g0ndhQhkIKher0FqV3mvkmE9z0sP7uVSHg=" [mod."cosmossdk.io/store"] - version = "v1.0.2" - hash = "sha256-mEaBNfU892M3V6qTMEDXb1GLaywlyouTRC5XfVqNSMs=" + version = "v0.0.0-20240402015425-ec314e8e2d07" + hash = "sha256-0yj6ToF5tiOsJ7B3sBS9lLLEDVrKJxi1tNlqRcWauDw=" + replaced = "github.com/crypto-org-chain/cosmos-sdk/store" [mod."cosmossdk.io/tools/confix"] version = "v0.1.1" hash = "sha256-/Et2FFhb4XfakbLFvGQK3QxN5Y7alzO+DGfi2/EWbxo=" @@ -160,8 +161,9 @@ schema = 3 version = "v1.0.0-beta.4" hash = "sha256-5Kn82nsZfiEtuwhhLZqmMxdAY1tX/Fi3HJ0/MEaRohw=" [mod."github.com/cosmos/cosmos-sdk"] - version = "v0.50.5-0.20240318121607-9a59234c4202" - hash = "sha256-JKBGS1Tcq95mp7UVGIBusf96z6ksZyyDCCyOSacpcFA=" + version = "v0.46.0-beta2.0.20240402015425-ec314e8e2d07" + hash = "sha256-kp8xXSle0X8PW4uIHXmfkGHjj3y6KTpcbvW+oJHsLzI=" + replaced = "github.com/crypto-org-chain/cosmos-sdk" [mod."github.com/cosmos/go-bip39"] version = "v1.0.0" hash = "sha256-Qm2aC2vaS8tjtMUbHmlBSagOSqbduEEDwc51qvQaBmA=" diff --git a/store/cachekv/README.md b/store/cachekv/README.md deleted file mode 100644 index aa0f442508..0000000000 --- a/store/cachekv/README.md +++ /dev/null @@ -1,140 +0,0 @@ -# CacheKVStore specification - -A `CacheKVStore` is cache wrapper for a `KVStore`. It extends the operations of the `KVStore` to work with a write-back cache, allowing for reduced I/O operations and more efficient disposing of changes (e.g. after processing a failed transaction). - -The core goals the CacheKVStore seeks to solve are: - -* Buffer all writes to the parent store, so they can be dropped if they need to be reverted -* Allow iteration over contiguous spans of keys -* Act as a cache, improving access time for reads that have already been done (by replacing tree access with hashtable access, avoiding disk I/O) - * Note: We actually fail to achieve this for iteration right now - * Note: Need to consider this getting too large and dropping some cached reads -* Make subsequent reads account for prior buffered writes -* Write all buffered changes to the parent store - -We should revisit these goals with time (for instance it's unclear that all disk writes need to be buffered to the end of the block), but this is the current status. - -## Types and Structs - -```go -type Store struct { - mtx sync.Mutex - cache map[string]*cValue - deleted map[string]struct{} - unsortedCache map[string]struct{} - sortedCache *dbm.MemDB // always ascending sorted - parent types.KVStore -} -``` - -The Store struct wraps the underlying `KVStore` (`parent`) with additional data structures for implementing the cache. Mutex is used as IAVL trees (the `KVStore` in application) are not safe for concurrent use. - -### `cache` - -The main mapping of key-value pairs stored in cache. This map contains both keys that are cached from read operations as well as ‘dirty’ keys which map to a value that is potentially different than what is in the underlying `KVStore`. - -Values that are mapped to in `cache` are wrapped in a `cValue` struct, which contains the value and a boolean flag (`dirty`) representing whether the value has been written since the last write-back to `parent`. - -```go -type cValue struct { - value []byte - dirty bool -} -``` - -### `deleted` - -Key-value pairs that are to be deleted from `parent` are stored in the `deleted` map. Keys are mapped to an empty struct to implement a set. - -### `unsortedCache` - -Similar to `deleted`, this is a set of keys that are dirty and will need to be updated in the parent `KVStore` upon a write. Keys are mapped to an empty struct to implement a set. - -### `sortedCache` - -A database that will be populated by the keys in `unsortedCache` during iteration over the cache. The keys are always held in sorted order. - -## CRUD Operations and Writing - -The `Set`, `Get`, and `Delete` functions all call `setCacheValue()`, which is the only entry point to mutating `cache` (besides `Write()`, which clears it). - -`setCacheValue()` inserts a key-value pair into `cache`. Two boolean parameters, `deleted` and `dirty`, are passed in to flag whether the inserted key should also be inserted into the `deleted` and `dirty` sets. Keys will be removed from the `deleted` set if they are written to after being deleted. - -### `Get` - -`Get` first attempts to return the value from `cache`. If the key does not exist in `cache`, `parent.Get()` is called instead. This value from the parent is passed into `setCacheValue()` with `deleted=false` and `dirty=false`. - -### `Has` - -`Has` returns true if `Get` returns a non-nil value. As a result of calling `Get`, it may mutate the cache by caching the read. - -### `Set` - -New values are written by setting or updating the value of a key in `cache`. `Set` does not write to `parent`. - -Calls `setCacheValue()` with `deleted=false` and `dirty=true`. - -### `Delete` - -A value being deleted from the `KVStore` is represented with a `nil` value in `cache`, and an insertion of the key into the `deleted` set. `Delete` does not write to `parent`. - -Calls `setCacheValue()` with `deleted=true` and `dirty=true`. - -### `Write` - -Key-value pairs in the cache are written to `parent` in ascending order of their keys. - -A slice of all dirty keys in `cache` is made, then sorted in increasing order. These keys are iterated over to update `parent`. - -If a key is marked for deletion (checked with `isDeleted()`), then `parent.Delete()` is called. Otherwise, `parent.Set()` is called to update the underlying `KVStore` with the value in cache. - -## Iteration - -Efficient iteration over keys in `KVStore` is important for generating Merkle range proofs. Iteration over `CacheKVStore` requires producing all key-value pairs from the underlying `KVStore` while taking into account updated values from the cache. - -In the current implementation, there is no guarantee that all values in `parent` have been cached. As a result, iteration is achieved by interleaved iteration through both `parent` and the cache (failing to actually benefit from caching). - -[cacheMergeIterator](https://github.com/cosmos/cosmos-sdk/blob/d8391cb6796d770b02448bee70b865d824e43449/store/cachekv/mergeiterator.go) implements functions to provide a single iterator with an input of iterators over `parent` and the cache. This iterator iterates over keys from both iterators in a shared lexicographic order, and overrides the value provided by the parent iterator if the same key is dirty or deleted in the cache. - -### Implementation Overview - -Iterators over `parent` and the cache are generated and passed into `cacheMergeIterator`, which returns a single, interleaved iterator. Implementation of the `parent` iterator is up to the underlying `KVStore`. The remainder of this section covers the generation of the cache iterator. - -Recall that `unsortedCache` is an unordered set of dirty cache keys. Our goal is to construct an ordered iterator over cache keys that fall within the `start` and `end` bounds requested. - -Generating the cache iterator can be decomposed into four parts: - -1. Finding all keys that exist in the range we are iterating over -2. Sorting this list of keys -3. Inserting these keys into `sortedCache` and removing them from `unsortedCache` -4. Returning an iterator over `sortedCache` with the desired range - -Currently, the implementation for the first two parts is split into two cases, depending on the size of the unsorted cache. The two cases are as follows. - -If the size of `unsortedCache` is less than `minSortSize` (currently 1024), a linear time approach is taken to search over keys. - -```go -n := len(store.unsortedCache) -unsorted := make([]*kv.Pair, 0) - -if n < minSortSize { - for key := range store.unsortedCache { - if dbm.IsKeyInDomain(conv.UnsafeStrToBytes(key), start, end) { - cacheValue := store.cache[key] - unsorted = append(unsorted, &kv.Pair{Key: []byte(key), Value: cacheValue.value}) - } - } - store.clearUnsortedCacheSubset(unsorted, stateUnsorted) - return -} -``` - -Here, we iterate through all the keys in `unsortedCache` (i.e., the dirty cache keys), collecting those within the requested range in an unsorted slice called `unsorted`. - -At this point, part 3. is achieved in `clearUnsortedCacheSubset()`. This function iterates through `unsorted`, removing each key from `unsortedCache`. Afterwards, `unsorted` is sorted. Lastly, it iterates through the now sorted slice, inserting key-value pairs into `sortedCache`. Any key marked for deletion is mapped to an arbitrary value (`[]byte{}`). - -In the case that the size of `unsortedCache` is larger than `minSortSize`, a linear time approach to finding keys within the desired range is too slow to use. Instead, a slice of all keys in `unsortedCache` is sorted, and binary search is used to find the beginning and ending indices of the desired range. This produces an already-sorted slice that is passed into the same `clearUnsortedCacheSubset()` function. An iota identifier (`sortedState`) is used to skip the sorting step in the function. - -Finally, part 4. is achieved with `memIterator`, which implements an iterator over the items in `sortedCache`. - -As of [PR #12885](https://github.com/cosmos/cosmos-sdk/pull/12885), an optimization to the binary search case mitigates the overhead of sorting the entirety of the key set in `unsortedCache`. To avoid wasting the compute spent sorting, we should ensure that a reasonable amount of values are removed from `unsortedCache`. If the length of the range for iteration is less than `minSortedCache`, we widen the range of values for removal from `unsortedCache` to be up to `minSortedCache` in length. This amortizes the cost of processing elements across multiple calls. diff --git a/store/cachekv/bench_helper_test.go b/store/cachekv/bench_helper_test.go deleted file mode 100644 index fe5be27fab..0000000000 --- a/store/cachekv/bench_helper_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package cachekv_test - -import "crypto/rand" - -func randSlice(sliceSize int) []byte { - bz := make([]byte, sliceSize) - _, _ = rand.Read(bz) - return bz -} - -func incrementByteSlice(bz []byte) { - for index := len(bz) - 1; index >= 0; index-- { - if bz[index] < 255 { - bz[index]++ - break - } else { - bz[index] = 0 - } - } -} - -// Generate many keys starting at startKey, and are in sequential order -func generateSequentialKeys(startKey []byte, numKeys int) [][]byte { - toReturn := make([][]byte, 0, numKeys) - cur := make([]byte, len(startKey)) - copy(cur, startKey) - for i := 0; i < numKeys; i++ { - newKey := make([]byte, len(startKey)) - copy(newKey, cur) - toReturn = append(toReturn, newKey) - incrementByteSlice(cur) - } - return toReturn -} - -// Generate many random, unsorted keys -func generateRandomKeys(keySize int, numKeys int) [][]byte { - toReturn := make([][]byte, 0, numKeys) - for i := 0; i < numKeys; i++ { - newKey := randSlice(keySize) - toReturn = append(toReturn, newKey) - } - return toReturn -} diff --git a/store/cachekv/benchmark_test.go b/store/cachekv/benchmark_test.go deleted file mode 100644 index 7936672448..0000000000 --- a/store/cachekv/benchmark_test.go +++ /dev/null @@ -1,133 +0,0 @@ -package cachekv_test - -import ( - fmt "fmt" - "testing" - - dbm "github.com/cosmos/cosmos-db" - "cosmossdk.io/store/dbadapter" - "github.com/evmos/ethermint/store/cachekv" - "github.com/stretchr/testify/require" -) - -func DoBenchmarkDeepCacheStack(b *testing.B, depth int) { - db := dbm.NewMemDB() - initialStore := cachekv.NewStore(dbadapter.Store{DB: db}) - - nItems := 20 - for i := 0; i < nItems; i++ { - initialStore.Set([]byte(fmt.Sprintf("hello%03d", i)), []byte{0}) - } - - var stack CacheStack - stack.Reset(initialStore) - - for i := 0; i < depth; i++ { - stack.Snapshot() - - store := stack.CurrentStore() - store.Set([]byte(fmt.Sprintf("hello%03d", i)), []byte{byte(i)}) - } - - store := stack.CurrentStore() - - b.ResetTimer() - for i := 0; i < b.N; i++ { - it := store.Iterator(nil, nil) - items := make([][]byte, 0, nItems) - for ; it.Valid(); it.Next() { - items = append(items, it.Key()) - it.Value() - } - it.Close() - require.Equal(b, nItems, len(items)) - } -} - -func BenchmarkDeepCacheStack1(b *testing.B) { - DoBenchmarkDeepCacheStack(b, 1) -} - -func BenchmarkDeepCacheStack3(b *testing.B) { - DoBenchmarkDeepCacheStack(b, 3) -} - -func BenchmarkDeepCacheStack10(b *testing.B) { - DoBenchmarkDeepCacheStack(b, 10) -} - -func BenchmarkDeepCacheStack13(b *testing.B) { - DoBenchmarkDeepCacheStack(b, 13) -} - -// CacheStack manages a stack of nested cache store to -// support the evm `StateDB`'s `Snapshot` and `RevertToSnapshot` methods. -type CacheStack struct { - initialStore *cachekv.Store - // Context of the initial state before transaction execution. - // It's the context used by `StateDB.CommitedState`. - cacheStores []*cachekv.Store -} - -// CurrentContext returns the top context of cached stack, -// if the stack is empty, returns the initial context. -func (cs *CacheStack) CurrentStore() *cachekv.Store { - l := len(cs.cacheStores) - if l == 0 { - return cs.initialStore - } - return cs.cacheStores[l-1] -} - -// Reset sets the initial context and clear the cache context stack. -func (cs *CacheStack) Reset(initialStore *cachekv.Store) { - cs.initialStore = initialStore - cs.cacheStores = nil -} - -// IsEmpty returns true if the cache context stack is empty. -func (cs *CacheStack) IsEmpty() bool { - return len(cs.cacheStores) == 0 -} - -// Commit commits all the cached contexts from top to bottom in order and clears the stack by setting an empty slice of cache contexts. -func (cs *CacheStack) Commit() { - // commit in order from top to bottom - for i := len(cs.cacheStores) - 1; i >= 0; i-- { - cs.cacheStores[i].Write() - } - cs.cacheStores = nil -} - -// CommitToRevision commit the cache after the target revision, -// to improve efficiency of db operations. -func (cs *CacheStack) CommitToRevision(target int) error { - if target < 0 || target >= len(cs.cacheStores) { - return fmt.Errorf("snapshot index %d out of bound [%d..%d)", target, 0, len(cs.cacheStores)) - } - - // commit in order from top to bottom - for i := len(cs.cacheStores) - 1; i > target; i-- { - cs.cacheStores[i].Write() - } - cs.cacheStores = cs.cacheStores[0 : target+1] - - return nil -} - -// Snapshot pushes a new cached context to the stack, -// and returns the index of it. -func (cs *CacheStack) Snapshot() int { - cs.cacheStores = append(cs.cacheStores, cs.CurrentStore().Clone()) - return len(cs.cacheStores) - 1 -} - -// RevertToSnapshot pops all the cached contexts after the target index (inclusive). -// the target should be snapshot index returned by `Snapshot`. -// This function panics if the index is out of bounds. -func (cs *CacheStack) RevertToSnapshot(target int) { - if target < 0 || target >= len(cs.cacheStores) { - panic(fmt.Errorf("snapshot index %d out of bound [%d..%d)", target, 0, len(cs.cacheStores))) - } - cs.cacheStores = cs.cacheStores[:target] -} diff --git a/store/cachekv/internal/btree.go b/store/cachekv/internal/btree.go deleted file mode 100644 index 76e6888b1f..0000000000 --- a/store/cachekv/internal/btree.go +++ /dev/null @@ -1,121 +0,0 @@ -package internal - -import ( - "bytes" - "errors" - - "cosmossdk.io/store/types" - "github.com/tidwall/btree" -) - -const ( - // The approximate number of items and children per B-tree node. Tuned with benchmarks. - // copied from memdb. - bTreeDegree = 32 -) - -var errKeyEmpty = errors.New("key cannot be empty") - -// BTree implements the sorted cache for cachekv store, -// we don't use MemDB here because cachekv is used extensively in sdk core path, -// we need it to be as fast as possible, while `MemDB` is mainly used as a mocking db in unit tests. -// -// We choose tidwall/btree over google/btree here because it provides API to implement step iterator directly. -type BTree struct { - tree *btree.BTreeG[item] -} - -// NewBTree creates a wrapper around `btree.BTreeG`. -func NewBTree() BTree { - return BTree{} -} - -func (bt *BTree) init() { - bt.tree = btree.NewBTreeGOptions(byKeys, btree.Options{ - Degree: bTreeDegree, - NoLocks: false, - }) -} - -func (bt *BTree) Set(key, value []byte, dirty bool) { - if bt.tree == nil { - bt.init() - } - bt.tree.Set(item{key: key, value: value, dirty: dirty}) -} - -func (bt BTree) Get(key []byte) ([]byte, bool) { - if bt.tree == nil { - return nil, false - } - - i, found := bt.tree.Get(newItem(key)) - if !found { - return nil, false - } - return i.value, true -} - -func (bt BTree) Delete(key []byte) { - if bt.tree == nil { - return - } - bt.tree.Delete(newItem(key)) -} - -func (bt BTree) Iterator(start, end []byte) (types.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, errKeyEmpty - } - return newMemIterator(start, end, bt, true), nil -} - -func (bt BTree) ReverseIterator(start, end []byte) (types.Iterator, error) { - if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, errKeyEmpty - } - return newMemIterator(start, end, bt, false), nil -} - -// ScanDirtyItems iterate over the dirty entries. -func (bt BTree) ScanDirtyItems(fn func(key, value []byte)) { - if bt.tree == nil { - return - } - - bt.tree.Scan(func(item item) bool { - if item.dirty { - fn(item.key, item.value) - } - return true - }) -} - -// Copy the tree. This is a copy-on-write operation and is very fast because -// it only performs a shadowed copy. -func (bt BTree) Copy() BTree { - if bt.tree == nil { - return BTree{} - } - - return BTree{ - tree: bt.tree.Copy(), - } -} - -// item is a btree item with byte slices as keys and values -type item struct { - key []byte - value []byte - dirty bool -} - -// byKeys compares the items by key -func byKeys(a, b item) bool { - return bytes.Compare(a.key, b.key) == -1 -} - -// newItem creates a new pair item. -func newItem(key []byte) item { - return item{key: key} -} diff --git a/store/cachekv/internal/btree_test.go b/store/cachekv/internal/btree_test.go deleted file mode 100644 index d43b4fa4fc..0000000000 --- a/store/cachekv/internal/btree_test.go +++ /dev/null @@ -1,208 +0,0 @@ -package internal - -import ( - "testing" - - "cosmossdk.io/store/types" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/stretchr/testify/require" -) - -func TestGetSetDelete(t *testing.T) { - db := NewBTree() - - // A nonexistent key should return nil. - value, found := db.Get([]byte("a")) - require.Nil(t, value) - require.False(t, found) - - // Set and get a value. - db.Set([]byte("a"), []byte{0x01}, true) - db.Set([]byte("b"), []byte{0x02}, true) - value, found = db.Get([]byte("a")) - require.Equal(t, []byte{0x01}, value) - require.True(t, found) - - value, found = db.Get([]byte("b")) - require.Equal(t, []byte{0x02}, value) - require.True(t, found) - - // Deleting a non-existent value is fine. - db.Delete([]byte("x")) - - // Delete a value. - db.Delete([]byte("a")) - - value, found = db.Get([]byte("a")) - require.Nil(t, value) - require.False(t, found) - - db.Delete([]byte("b")) - - value, found = db.Get([]byte("b")) - require.Nil(t, value) - require.False(t, found) -} - -func TestDBIterator(t *testing.T) { - db := NewBTree() - - for i := 0; i < 10; i++ { - if i != 6 { // but skip 6. - db.Set(int642Bytes(int64(i)), []byte{}, true) - } - } - - // Blank iterator keys should error - _, err := db.ReverseIterator([]byte{}, nil) - require.Equal(t, errKeyEmpty, err) - _, err = db.ReverseIterator(nil, []byte{}) - require.Equal(t, errKeyEmpty, err) - - itr, err := db.Iterator(nil, nil) - require.NoError(t, err) - verifyIterator(t, itr, []int64{0, 1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator") - - ritr, err := db.ReverseIterator(nil, nil) - require.NoError(t, err) - verifyIterator(t, ritr, []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator") - - itr, err = db.Iterator(nil, int642Bytes(0)) - require.NoError(t, err) - verifyIterator(t, itr, []int64(nil), "forward iterator to 0") - - ritr, err = db.ReverseIterator(int642Bytes(10), nil) - require.NoError(t, err) - verifyIterator(t, ritr, []int64(nil), "reverse iterator from 10 (ex)") - - itr, err = db.Iterator(int642Bytes(0), nil) - require.NoError(t, err) - verifyIterator(t, itr, []int64{0, 1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator from 0") - - itr, err = db.Iterator(int642Bytes(1), nil) - require.NoError(t, err) - verifyIterator(t, itr, []int64{1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator from 1") - - ritr, err = db.ReverseIterator(nil, int642Bytes(10)) - require.NoError(t, err) - verifyIterator(t, ritr, - []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 10 (ex)") - - ritr, err = db.ReverseIterator(nil, int642Bytes(9)) - require.NoError(t, err) - verifyIterator(t, ritr, - []int64{8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 9 (ex)") - - ritr, err = db.ReverseIterator(nil, int642Bytes(8)) - require.NoError(t, err) - verifyIterator(t, ritr, - []int64{7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 8 (ex)") - - itr, err = db.Iterator(int642Bytes(5), int642Bytes(6)) - require.NoError(t, err) - verifyIterator(t, itr, []int64{5}, "forward iterator from 5 to 6") - - itr, err = db.Iterator(int642Bytes(5), int642Bytes(7)) - require.NoError(t, err) - verifyIterator(t, itr, []int64{5}, "forward iterator from 5 to 7") - - itr, err = db.Iterator(int642Bytes(5), int642Bytes(8)) - require.NoError(t, err) - verifyIterator(t, itr, []int64{5, 7}, "forward iterator from 5 to 8") - - itr, err = db.Iterator(int642Bytes(6), int642Bytes(7)) - require.NoError(t, err) - verifyIterator(t, itr, []int64(nil), "forward iterator from 6 to 7") - - itr, err = db.Iterator(int642Bytes(6), int642Bytes(8)) - require.NoError(t, err) - verifyIterator(t, itr, []int64{7}, "forward iterator from 6 to 8") - - itr, err = db.Iterator(int642Bytes(7), int642Bytes(8)) - require.NoError(t, err) - verifyIterator(t, itr, []int64{7}, "forward iterator from 7 to 8") - - ritr, err = db.ReverseIterator(int642Bytes(4), int642Bytes(5)) - require.NoError(t, err) - verifyIterator(t, ritr, []int64{4}, "reverse iterator from 5 (ex) to 4") - - ritr, err = db.ReverseIterator(int642Bytes(4), int642Bytes(6)) - require.NoError(t, err) - verifyIterator(t, ritr, - []int64{5, 4}, "reverse iterator from 6 (ex) to 4") - - ritr, err = db.ReverseIterator(int642Bytes(4), int642Bytes(7)) - require.NoError(t, err) - verifyIterator(t, ritr, - []int64{5, 4}, "reverse iterator from 7 (ex) to 4") - - ritr, err = db.ReverseIterator(int642Bytes(5), int642Bytes(6)) - require.NoError(t, err) - verifyIterator(t, ritr, []int64{5}, "reverse iterator from 6 (ex) to 5") - - ritr, err = db.ReverseIterator(int642Bytes(5), int642Bytes(7)) - require.NoError(t, err) - verifyIterator(t, ritr, []int64{5}, "reverse iterator from 7 (ex) to 5") - - ritr, err = db.ReverseIterator(int642Bytes(6), int642Bytes(7)) - require.NoError(t, err) - verifyIterator(t, ritr, - []int64(nil), "reverse iterator from 7 (ex) to 6") - - ritr, err = db.ReverseIterator(int642Bytes(10), nil) - require.NoError(t, err) - verifyIterator(t, ritr, []int64(nil), "reverse iterator to 10") - - ritr, err = db.ReverseIterator(int642Bytes(6), nil) - require.NoError(t, err) - verifyIterator(t, ritr, []int64{9, 8, 7}, "reverse iterator to 6") - - ritr, err = db.ReverseIterator(int642Bytes(5), nil) - require.NoError(t, err) - verifyIterator(t, ritr, []int64{9, 8, 7, 5}, "reverse iterator to 5") - - ritr, err = db.ReverseIterator(int642Bytes(8), int642Bytes(9)) - require.NoError(t, err) - verifyIterator(t, ritr, []int64{8}, "reverse iterator from 9 (ex) to 8") - - ritr, err = db.ReverseIterator(int642Bytes(2), int642Bytes(4)) - require.NoError(t, err) - verifyIterator(t, ritr, - []int64{3, 2}, "reverse iterator from 4 (ex) to 2") - - ritr, err = db.ReverseIterator(int642Bytes(4), int642Bytes(2)) - require.NoError(t, err) - verifyIterator(t, ritr, - []int64(nil), "reverse iterator from 2 (ex) to 4") - - // Ensure that the iterators don't panic with an empty database. - db2 := NewBTree() - - itr, err = db2.Iterator(nil, nil) - require.NoError(t, err) - verifyIterator(t, itr, nil, "forward iterator with empty db") - - ritr, err = db2.ReverseIterator(nil, nil) - require.NoError(t, err) - verifyIterator(t, ritr, nil, "reverse iterator with empty db") -} - -func verifyIterator(t *testing.T, itr types.Iterator, expected []int64, msg string) { - i := 0 - for itr.Valid() { - key := itr.Key() - require.Equal(t, expected[i], bytes2Int64(key), "iterator: %d mismatches", i) - itr.Next() - i++ - } - require.Equal(t, i, len(expected), "expected to have fully iterated over all the elements in iter") - require.NoError(t, itr.Close()) -} - -func int642Bytes(i int64) []byte { - return sdk.Uint64ToBigEndian(uint64(i)) -} - -func bytes2Int64(buf []byte) int64 { - return int64(sdk.BigEndianToUint64(buf)) -} diff --git a/store/cachekv/internal/memiterator.go b/store/cachekv/internal/memiterator.go deleted file mode 100644 index de8db07339..0000000000 --- a/store/cachekv/internal/memiterator.go +++ /dev/null @@ -1,123 +0,0 @@ -package internal - -import ( - "bytes" - "errors" - - "cosmossdk.io/store/types" - "github.com/tidwall/btree" -) - -var _ types.Iterator = (*memIterator)(nil) - -// memIterator iterates over iterKVCache items. -// if value is nil, means it was deleted. -// Implements Iterator. -type memIterator struct { - iter btree.IterG[item] - - start []byte - end []byte - ascending bool - valid bool -} - -func newMemIterator(start, end []byte, items BTree, ascending bool) *memIterator { - if items.tree == nil { - return &memIterator{start: start, end: end, ascending: ascending, valid: false} - } - - iter := items.tree.Iter() - var valid bool - if ascending { - if start != nil { - valid = iter.Seek(newItem(start)) - } else { - valid = iter.First() - } - } else { - if end != nil { - valid = iter.Seek(newItem(end)) - if !valid { - valid = iter.Last() - } else { - // end is exclusive - valid = iter.Prev() - } - } else { - valid = iter.Last() - } - } - - mi := &memIterator{ - iter: iter, - start: start, - end: end, - ascending: ascending, - valid: valid, - } - - if mi.valid { - mi.valid = mi.keyInRange(mi.Key()) - } - - return mi -} - -func (mi *memIterator) Domain() (start []byte, end []byte) { - return mi.start, mi.end -} - -func (mi *memIterator) Close() error { - mi.iter.Release() - return nil -} - -func (mi *memIterator) Error() error { - if !mi.Valid() { - return errors.New("invalid memIterator") - } - return nil -} - -func (mi *memIterator) Valid() bool { - return mi.valid -} - -func (mi *memIterator) Next() { - mi.assertValid() - - if mi.ascending { - mi.valid = mi.iter.Next() - } else { - mi.valid = mi.iter.Prev() - } - - if mi.valid { - mi.valid = mi.keyInRange(mi.Key()) - } -} - -func (mi *memIterator) keyInRange(key []byte) bool { - if mi.ascending && mi.end != nil && bytes.Compare(key, mi.end) >= 0 { - return false - } - if !mi.ascending && mi.start != nil && bytes.Compare(key, mi.start) < 0 { - return false - } - return true -} - -func (mi *memIterator) Key() []byte { - return mi.iter.Item().key -} - -func (mi *memIterator) Value() []byte { - return mi.iter.Item().value -} - -func (mi *memIterator) assertValid() { - if err := mi.Error(); err != nil { - panic(err) - } -} diff --git a/store/cachekv/internal/mergeiterator.go b/store/cachekv/internal/mergeiterator.go deleted file mode 100644 index e4d80ab491..0000000000 --- a/store/cachekv/internal/mergeiterator.go +++ /dev/null @@ -1,235 +0,0 @@ -package internal - -import ( - "bytes" - "errors" - - "cosmossdk.io/store/types" -) - -// cacheMergeIterator merges a parent Iterator and a cache Iterator. -// The cache iterator may return nil keys to signal that an item -// had been deleted (but not deleted in the parent). -// If the cache iterator has the same key as the parent, the -// cache shadows (overrides) the parent. -// -// TODO: Optimize by memoizing. -type cacheMergeIterator struct { - parent types.Iterator - cache types.Iterator - ascending bool - - valid bool -} - -var _ types.Iterator = (*cacheMergeIterator)(nil) - -func NewCacheMergeIterator(parent, cache types.Iterator, ascending bool) *cacheMergeIterator { //nolint:revive - iter := &cacheMergeIterator{ - parent: parent, - cache: cache, - ascending: ascending, - } - - iter.valid = iter.skipUntilExistsOrInvalid() - return iter -} - -// Domain implements Iterator. -// Returns parent domain because cache and parent domains are the same. -func (iter *cacheMergeIterator) Domain() (start, end []byte) { - return iter.parent.Domain() -} - -// Valid implements Iterator. -func (iter *cacheMergeIterator) Valid() bool { - return iter.valid -} - -// Next implements Iterator -func (iter *cacheMergeIterator) Next() { - iter.assertValid() - - switch { - case !iter.parent.Valid(): - // If parent is invalid, get the next cache item. - iter.cache.Next() - case !iter.cache.Valid(): - // If cache is invalid, get the next parent item. - iter.parent.Next() - default: - // Both are valid. Compare keys. - keyP, keyC := iter.parent.Key(), iter.cache.Key() - switch iter.compare(keyP, keyC) { - case -1: // parent < cache - iter.parent.Next() - case 0: // parent == cache - iter.parent.Next() - iter.cache.Next() - case 1: // parent > cache - iter.cache.Next() - } - } - iter.valid = iter.skipUntilExistsOrInvalid() -} - -// Key implements Iterator -func (iter *cacheMergeIterator) Key() []byte { - iter.assertValid() - - // If parent is invalid, get the cache key. - if !iter.parent.Valid() { - return iter.cache.Key() - } - - // If cache is invalid, get the parent key. - if !iter.cache.Valid() { - return iter.parent.Key() - } - - // Both are valid. Compare keys. - keyP, keyC := iter.parent.Key(), iter.cache.Key() - - cmp := iter.compare(keyP, keyC) - switch cmp { - case -1: // parent < cache - return keyP - case 0: // parent == cache - return keyP - case 1: // parent > cache - return keyC - default: - panic("invalid compare result") - } -} - -// Value implements Iterator -func (iter *cacheMergeIterator) Value() []byte { - iter.assertValid() - - // If parent is invalid, get the cache value. - if !iter.parent.Valid() { - return iter.cache.Value() - } - - // If cache is invalid, get the parent value. - if !iter.cache.Valid() { - return iter.parent.Value() - } - - // Both are valid. Compare keys. - keyP, keyC := iter.parent.Key(), iter.cache.Key() - - cmp := iter.compare(keyP, keyC) - switch cmp { - case -1: // parent < cache - return iter.parent.Value() - case 0: // parent == cache - return iter.cache.Value() - case 1: // parent > cache - return iter.cache.Value() - default: - panic("invalid comparison result") - } -} - -// Close implements Iterator -func (iter *cacheMergeIterator) Close() error { - err1 := iter.cache.Close() - if err := iter.parent.Close(); err != nil { - return err - } - - return err1 -} - -// Error returns an error if the cacheMergeIterator is invalid defined by the -// Valid method. -func (iter *cacheMergeIterator) Error() error { - if !iter.Valid() { - return errors.New("invalid cacheMergeIterator") - } - - return nil -} - -// If not valid, panics. -// NOTE: May have side-effect of iterating over cache. -func (iter *cacheMergeIterator) assertValid() { - if err := iter.Error(); err != nil { - panic(err) - } -} - -// Like bytes.Compare but opposite if not ascending. -func (iter *cacheMergeIterator) compare(a, b []byte) int { - if iter.ascending { - return bytes.Compare(a, b) - } - - return bytes.Compare(a, b) * -1 -} - -// Skip all delete-items from the cache w/ `key < until`. After this function, -// current cache item is a non-delete-item, or `until <= key`. -// If the current cache item is not a delete item, does nothing. -// If `until` is nil, there is no limit, and cache may end up invalid. -// CONTRACT: cache is valid. -func (iter *cacheMergeIterator) skipCacheDeletes(until []byte) { - for iter.cache.Valid() && - iter.cache.Value() == nil && - (until == nil || iter.compare(iter.cache.Key(), until) < 0) { - iter.cache.Next() - } -} - -// Fast forwards cache (or parent+cache in case of deleted items) until current -// item exists, or until iterator becomes invalid. -// Returns whether the iterator is valid. -func (iter *cacheMergeIterator) skipUntilExistsOrInvalid() bool { - for { - // If parent is invalid, fast-forward cache. - if !iter.parent.Valid() { - iter.skipCacheDeletes(nil) - return iter.cache.Valid() - } - // Parent is valid. - - if !iter.cache.Valid() { - return true - } - // Parent is valid, cache is valid. - - // Compare parent and cache. - keyP := iter.parent.Key() - keyC := iter.cache.Key() - - switch iter.compare(keyP, keyC) { - case -1: // parent < cache. - return true - - case 0: // parent == cache. - // Skip over if cache item is a delete. - valueC := iter.cache.Value() - if valueC == nil { - iter.parent.Next() - iter.cache.Next() - - continue - } - // Cache is not a delete. - - return true // cache exists. - case 1: // cache < parent - // Skip over if cache item is a delete. - valueC := iter.cache.Value() - if valueC == nil { - iter.skipCacheDeletes(keyP) - continue - } - // Cache is not a delete. - - return true // cache exists. - } - } -} diff --git a/store/cachekv/store.go b/store/cachekv/store.go deleted file mode 100644 index 9e809d4993..0000000000 --- a/store/cachekv/store.go +++ /dev/null @@ -1,179 +0,0 @@ -package cachekv - -import ( - "io" - "sync" - - "cosmossdk.io/store/tracekv" - "cosmossdk.io/store/types" - "github.com/evmos/ethermint/store/cachekv/internal" -) - -// Store wraps an in-memory cache around an underlying types.KVStore. -type Store struct { - mtx sync.Mutex - cache internal.BTree // always ascending sorted - parent types.KVStore -} - -var _ types.CacheKVStore = (*Store)(nil) - -// NewStore creates a new Store object -func NewStore(parent types.KVStore) *Store { - return &Store{ - cache: internal.NewBTree(), - parent: parent, - } -} - -// GetStoreType implements Store. -func (store *Store) GetStoreType() types.StoreType { - return store.parent.GetStoreType() -} - -// Clone creates a snapshot of the cache store. -// This is a copy-on-write operation and is very fast because -// it only performs a shadowed copy. -func (store *Store) Clone() *Store { - store.mtx.Lock() - defer store.mtx.Unlock() - - return &Store{ - cache: store.cache.Copy(), - parent: store.parent, - } -} - -// swapCache swap out the internal cache store and leave the current store in a unusable state. -func (store *Store) swapCache() internal.BTree { - store.mtx.Lock() - defer store.mtx.Unlock() - - cache := store.cache - store.cache = internal.BTree{} - return cache -} - -// Restore restores the store cache to a given snapshot. -func (store *Store) Restore(s types.CacheKVStore) { - cache := s.(*Store).swapCache() - - store.mtx.Lock() - defer store.mtx.Unlock() - - store.cache = cache -} - -// Get implements types.KVStore. -func (store *Store) Get(key []byte) (value []byte) { - store.mtx.Lock() - defer store.mtx.Unlock() - - types.AssertValidKey(key) - - if value, found := store.cache.Get(key); found { - return value - } - value = store.parent.Get(key) - store.setCacheValue(key, value, false) - return value -} - -// Set implements types.KVStore. -func (store *Store) Set(key []byte, value []byte) { - store.mtx.Lock() - defer store.mtx.Unlock() - - types.AssertValidKey(key) - types.AssertValidValue(value) - - store.setCacheValue(key, value, true) -} - -// Has implements types.KVStore. -func (store *Store) Has(key []byte) bool { - value := store.Get(key) - return value != nil -} - -// Delete implements types.KVStore. -func (store *Store) Delete(key []byte) { - store.mtx.Lock() - defer store.mtx.Unlock() - - types.AssertValidKey(key) - store.setCacheValue(key, nil, true) -} - -// Implements Cachetypes.KVStore. -func (store *Store) Write() { - store.mtx.Lock() - defer store.mtx.Unlock() - - store.cache.ScanDirtyItems(func(key, value []byte) { - if value == nil { - store.parent.Delete(key) - } else { - store.parent.Set(key, value) - } - }) - - store.cache = internal.NewBTree() -} - -// CacheWrap implements CacheWrapper. -func (store *Store) CacheWrap() types.CacheWrap { - return NewStore(store) -} - -// CacheWrapWithTrace implements the CacheWrapper interface. -func (store *Store) CacheWrapWithTrace(w io.Writer, tc types.TraceContext) types.CacheWrap { - return NewStore(tracekv.NewStore(store, w, tc)) -} - -//---------------------------------------- -// Iteration - -// Iterator implements types.KVStore. -func (store *Store) Iterator(start, end []byte) types.Iterator { - return store.iterator(start, end, true) -} - -// ReverseIterator implements types.KVStore. -func (store *Store) ReverseIterator(start, end []byte) types.Iterator { - return store.iterator(start, end, false) -} - -func (store *Store) iterator(start, end []byte, ascending bool) types.Iterator { - store.mtx.Lock() - defer store.mtx.Unlock() - - isoSortedCache := store.cache.Copy() - - var ( - err error - parent, cache types.Iterator - ) - - if ascending { - parent = store.parent.Iterator(start, end) - cache, err = isoSortedCache.Iterator(start, end) - } else { - parent = store.parent.ReverseIterator(start, end) - cache, err = isoSortedCache.ReverseIterator(start, end) - } - if err != nil { - panic(err) - } - - return internal.NewCacheMergeIterator(parent, cache, ascending) -} - -//---------------------------------------- -// etc - -// Only entrypoint to mutate store.cache. -// A `nil` value means a deletion. -func (store *Store) setCacheValue(key, value []byte, dirty bool) { - store.cache.Set(key, value, dirty) -} diff --git a/store/cachekv/store_bench_test.go b/store/cachekv/store_bench_test.go deleted file mode 100644 index 9e7ece49e5..0000000000 --- a/store/cachekv/store_bench_test.go +++ /dev/null @@ -1,149 +0,0 @@ -package cachekv_test - -import ( - "testing" - - dbm "github.com/cosmos/cosmos-db" - - "cosmossdk.io/store/dbadapter" - "github.com/evmos/ethermint/store/cachekv" -) - -var sink interface{} - -const defaultValueSizeBz = 1 << 12 - -// This benchmark measures the time of iterator.Next() when the parent store is blank -func benchmarkBlankParentIteratorNext(b *testing.B, keysize int) { - mem := dbadapter.Store{DB: dbm.NewMemDB()} - kvstore := cachekv.NewStore(mem) - // Use a singleton for value, to not waste time computing it - value := randSlice(defaultValueSizeBz) - // Use simple values for keys, pick a random start, - // and take next b.N keys sequentially after.] - startKey := randSlice(32) - - // Add 1 to avoid issues when b.N = 1 - keys := generateSequentialKeys(startKey, b.N+1) - for _, k := range keys { - kvstore.Set(k, value) - } - - b.ReportAllocs() - b.ResetTimer() - - iter := kvstore.Iterator(keys[0], keys[b.N]) - defer iter.Close() - - for ; iter.Valid(); iter.Next() { - _ = iter.Key() - // deadcode elimination stub - sink = iter - } -} - -// Benchmark setting New keys to a store, where the new keys are in sequence. -func benchmarkBlankParentAppend(b *testing.B, keysize int) { - mem := dbadapter.Store{DB: dbm.NewMemDB()} - kvstore := cachekv.NewStore(mem) - - // Use a singleton for value, to not waste time computing it - value := randSlice(32) - // Use simple values for keys, pick a random start, - // and take next b.N keys sequentially after. - startKey := randSlice(32) - - keys := generateSequentialKeys(startKey, b.N) - - b.ReportAllocs() - b.ResetTimer() - - for _, k := range keys { - kvstore.Set(k, value) - } -} - -// Benchmark setting New keys to a store, where the new keys are random. -// the speed of this function does not depend on the values in the parent store -func benchmarkRandomSet(b *testing.B, keysize int) { - mem := dbadapter.Store{DB: dbm.NewMemDB()} - kvstore := cachekv.NewStore(mem) - - // Use a singleton for value, to not waste time computing it - value := randSlice(defaultValueSizeBz) - // Add 1 to avoid issues when b.N = 1 - keys := generateRandomKeys(keysize, b.N+1) - - b.ReportAllocs() - b.ResetTimer() - - for _, k := range keys { - kvstore.Set(k, value) - } - - iter := kvstore.Iterator(keys[0], keys[b.N]) - defer iter.Close() - - for ; iter.Valid(); iter.Next() { - _ = iter.Key() - // deadcode elimination stub - sink = iter - } -} - -// Benchmark creating an iterator on a parent with D entries, -// that are all deleted in the cacheKV store. -// We essentially are benchmarking the cacheKV iterator creation & iteration times -// with the number of entries deleted in the parent. -func benchmarkIteratorOnParentWithManyDeletes(b *testing.B, numDeletes int) { - mem := dbadapter.Store{DB: dbm.NewMemDB()} - - // Use a singleton for value, to not waste time computing it - value := randSlice(32) - // Use simple values for keys, pick a random start, - // and take next D keys sequentially after. - startKey := randSlice(32) - // Add 1 to avoid issues when numDeletes = 1 - keys := generateSequentialKeys(startKey, numDeletes+1) - // setup parent db with D keys. - for _, k := range keys { - mem.Set(k, value) - } - kvstore := cachekv.NewStore(mem) - // Delete all keys from the cache KV store. - // The keys[1:] is to keep at least one entry in parent, due to a bug in the SDK iterator design. - // Essentially the iterator will never be valid, in that it should never run. - // However, this is incompatible with the for loop structure the SDK uses, hence - // causes a panic. Thus we do keys[1:]. - for _, k := range keys[1:] { - kvstore.Delete(k) - } - - b.ReportAllocs() - b.ResetTimer() - - iter := kvstore.Iterator(keys[0], keys[numDeletes]) - defer iter.Close() - - for ; iter.Valid(); iter.Next() { - _ = iter.Key() - // deadcode elimination stub - sink = iter - } -} - -func BenchmarkBlankParentIteratorNextKeySize32(b *testing.B) { - benchmarkBlankParentIteratorNext(b, 32) -} - -func BenchmarkBlankParentAppendKeySize32(b *testing.B) { - benchmarkBlankParentAppend(b, 32) -} - -func BenchmarkSetKeySize32(b *testing.B) { - benchmarkRandomSet(b, 32) -} - -func BenchmarkIteratorOnParentWith1MDeletes(b *testing.B) { - benchmarkIteratorOnParentWithManyDeletes(b, 1_000_000) -} diff --git a/store/cachekv/store_test.go b/store/cachekv/store_test.go deleted file mode 100644 index bb53bd660d..0000000000 --- a/store/cachekv/store_test.go +++ /dev/null @@ -1,707 +0,0 @@ -package cachekv_test - -import ( - "fmt" - "testing" - - dbm "github.com/cosmos/cosmos-db" - tmrand "github.com/cometbft/cometbft/libs/rand" - "github.com/stretchr/testify/require" - - "cosmossdk.io/store/dbadapter" - "cosmossdk.io/store/types" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/evmos/ethermint/store/cachekv" -) - -func newCacheKVStore() types.CacheKVStore { - // create two layer of cache store to better emulate the real world. - mem := dbadapter.Store{DB: dbm.NewMemDB()} - deliverState := cachekv.NewStore(mem) - return deliverState.Clone() -} - -func keyFmt(i int) []byte { return bz(fmt.Sprintf("key%0.8d", i)) } -func valFmt(i int) []byte { return bz(fmt.Sprintf("value%0.8d", i)) } - -func TestCacheKVStore(t *testing.T) { - mem := dbadapter.Store{DB: dbm.NewMemDB()} - st := cachekv.NewStore(mem) - - require.Empty(t, st.Get(keyFmt(1)), "Expected `key1` to be empty") - - // put something in mem and in cache - mem.Set(keyFmt(1), valFmt(1)) - st.Set(keyFmt(1), valFmt(1)) - require.Equal(t, valFmt(1), st.Get(keyFmt(1))) - - // update it in cache, shoudn't change mem - st.Set(keyFmt(1), valFmt(2)) - require.Equal(t, valFmt(2), st.Get(keyFmt(1))) - require.Equal(t, valFmt(1), mem.Get(keyFmt(1))) - - // write it. should change mem - st.Write() - require.Equal(t, valFmt(2), mem.Get(keyFmt(1))) - require.Equal(t, valFmt(2), st.Get(keyFmt(1))) - - // more writes and checks - st.Write() - st.Write() - require.Equal(t, valFmt(2), mem.Get(keyFmt(1))) - require.Equal(t, valFmt(2), st.Get(keyFmt(1))) - - // make a new one, check it - st = cachekv.NewStore(mem) - require.Equal(t, valFmt(2), st.Get(keyFmt(1))) - - // make a new one and delete - should not be removed from mem - st = cachekv.NewStore(mem) - st.Delete(keyFmt(1)) - require.Empty(t, st.Get(keyFmt(1))) - require.Equal(t, mem.Get(keyFmt(1)), valFmt(2)) - - // Write. should now be removed from both - st.Write() - require.Empty(t, st.Get(keyFmt(1)), "Expected `key1` to be empty") - require.Empty(t, mem.Get(keyFmt(1)), "Expected `key1` to be empty") -} - -func TestCacheKVStoreNoNilSet(t *testing.T) { - mem := dbadapter.Store{DB: dbm.NewMemDB()} - st := cachekv.NewStore(mem) - require.Panics(t, func() { st.Set([]byte("key"), nil) }, "setting a nil value should panic") - require.Panics(t, func() { st.Set(nil, []byte("value")) }, "setting a nil key should panic") - require.Panics(t, func() { st.Set([]byte(""), []byte("value")) }, "setting an empty key should panic") -} - -func TestCacheKVStoreNested(t *testing.T) { - mem := dbadapter.Store{DB: dbm.NewMemDB()} - st := cachekv.NewStore(mem) - - // set. check its there on st and not on mem. - st.Set(keyFmt(1), valFmt(1)) - require.Empty(t, mem.Get(keyFmt(1))) - require.Equal(t, valFmt(1), st.Get(keyFmt(1))) - - // make a new from st and check - st2 := cachekv.NewStore(st) - require.Equal(t, valFmt(1), st2.Get(keyFmt(1))) - - // update the value on st2, check it only effects st2 - st2.Set(keyFmt(1), valFmt(3)) - require.Equal(t, []byte(nil), mem.Get(keyFmt(1))) - require.Equal(t, valFmt(1), st.Get(keyFmt(1))) - require.Equal(t, valFmt(3), st2.Get(keyFmt(1))) - - // st2 writes to its parent, st. doesnt effect mem - st2.Write() - require.Equal(t, []byte(nil), mem.Get(keyFmt(1))) - require.Equal(t, valFmt(3), st.Get(keyFmt(1))) - - // updates mem - st.Write() - require.Equal(t, valFmt(3), mem.Get(keyFmt(1))) -} - -func TestCacheKVIteratorBounds(t *testing.T) { - st := newCacheKVStore() - - // set some items - nItems := 5 - for i := 0; i < nItems; i++ { - st.Set(keyFmt(i), valFmt(i)) - } - - // iterate over all of them - itr := st.Iterator(nil, nil) - i := 0 - for ; itr.Valid(); itr.Next() { - k, v := itr.Key(), itr.Value() - require.Equal(t, keyFmt(i), k) - require.Equal(t, valFmt(i), v) - i++ - } - require.Equal(t, nItems, i) - require.NoError(t, itr.Close()) - - // iterate over none - itr = st.Iterator(bz("money"), nil) - i = 0 - for ; itr.Valid(); itr.Next() { - i++ - } - require.Equal(t, 0, i) - require.NoError(t, itr.Close()) - - // iterate over lower - itr = st.Iterator(keyFmt(0), keyFmt(3)) - i = 0 - for ; itr.Valid(); itr.Next() { - k, v := itr.Key(), itr.Value() - require.Equal(t, keyFmt(i), k) - require.Equal(t, valFmt(i), v) - i++ - } - require.Equal(t, 3, i) - require.NoError(t, itr.Close()) - - // iterate over upper - itr = st.Iterator(keyFmt(2), keyFmt(4)) - i = 2 - for ; itr.Valid(); itr.Next() { - k, v := itr.Key(), itr.Value() - require.Equal(t, keyFmt(i), k) - require.Equal(t, valFmt(i), v) - i++ - } - require.Equal(t, 4, i) - require.NoError(t, itr.Close()) -} - -func TestCacheKVReverseIteratorBounds(t *testing.T) { - st := newCacheKVStore() - - // set some items - nItems := 5 - for i := 0; i < nItems; i++ { - st.Set(keyFmt(i), valFmt(i)) - } - - // iterate over all of them - itr := st.ReverseIterator(nil, nil) - i := 0 - for ; itr.Valid(); itr.Next() { - k, v := itr.Key(), itr.Value() - require.Equal(t, keyFmt(nItems-1-i), k) - require.Equal(t, valFmt(nItems-1-i), v) - i++ - } - require.Equal(t, nItems, i) - require.NoError(t, itr.Close()) - - // iterate over none - itr = st.ReverseIterator(bz("money"), nil) - i = 0 - for ; itr.Valid(); itr.Next() { - i++ - } - require.Equal(t, 0, i) - require.NoError(t, itr.Close()) - - // iterate over lower - end := 3 - itr = st.ReverseIterator(keyFmt(0), keyFmt(end)) - i = 0 - for ; itr.Valid(); itr.Next() { - i++ - k, v := itr.Key(), itr.Value() - require.Equal(t, keyFmt(end-i), k) - require.Equal(t, valFmt(end-i), v) - } - require.Equal(t, 3, i) - require.NoError(t, itr.Close()) - - // iterate over upper - end = 4 - itr = st.ReverseIterator(keyFmt(2), keyFmt(end)) - i = 0 - for ; itr.Valid(); itr.Next() { - i++ - k, v := itr.Key(), itr.Value() - require.Equal(t, keyFmt(end-i), k) - require.Equal(t, valFmt(end-i), v) - } - require.Equal(t, 2, i) - require.NoError(t, itr.Close()) -} - -func TestCacheKVMergeIteratorBasics(t *testing.T) { - st := newCacheKVStore() - - // set and delete an item in the cache, iterator should be empty - k, v := keyFmt(0), valFmt(0) - st.Set(k, v) - st.Delete(k) - assertIterateDomain(t, st, 0) - - // now set it and assert its there - st.Set(k, v) - assertIterateDomain(t, st, 1) - - // write it and assert its there - st.Write() - assertIterateDomain(t, st, 1) - - // remove it in cache and assert its not - st.Delete(k) - assertIterateDomain(t, st, 0) - - // write the delete and assert its not there - st.Write() - assertIterateDomain(t, st, 0) - - // add two keys and assert theyre there - k1, v1 := keyFmt(1), valFmt(1) - st.Set(k, v) - st.Set(k1, v1) - assertIterateDomain(t, st, 2) - - // write it and assert theyre there - st.Write() - assertIterateDomain(t, st, 2) - - // remove one in cache and assert its not - st.Delete(k1) - assertIterateDomain(t, st, 1) - - // write the delete and assert its not there - st.Write() - assertIterateDomain(t, st, 1) - - // delete the other key in cache and asserts its empty - st.Delete(k) - assertIterateDomain(t, st, 0) -} - -func TestCacheKVMergeIteratorDeleteLast(t *testing.T) { - st := newCacheKVStore() - - // set some items and write them - nItems := 5 - for i := 0; i < nItems; i++ { - st.Set(keyFmt(i), valFmt(i)) - } - st.Write() - - // set some more items and leave dirty - for i := nItems; i < nItems*2; i++ { - st.Set(keyFmt(i), valFmt(i)) - } - - // iterate over all of them - assertIterateDomain(t, st, nItems*2) - - // delete them all - for i := 0; i < nItems*2; i++ { - last := nItems*2 - 1 - i - st.Delete(keyFmt(last)) - assertIterateDomain(t, st, last) - } -} - -func TestCacheKVMergeIteratorDeletes(t *testing.T) { - st := newCacheKVStore() - truth := dbm.NewMemDB() - - // set some items and write them - nItems := 10 - for i := 0; i < nItems; i++ { - doOp(t, st, truth, opSet, i) - } - st.Write() - - // delete every other item, starting from 0 - for i := 0; i < nItems; i += 2 { - doOp(t, st, truth, opDel, i) - assertIterateDomainCompare(t, st, truth) - } - - // reset - st = newCacheKVStore() - truth = dbm.NewMemDB() - - // set some items and write them - for i := 0; i < nItems; i++ { - doOp(t, st, truth, opSet, i) - } - st.Write() - - // delete every other item, starting from 1 - for i := 1; i < nItems; i += 2 { - doOp(t, st, truth, opDel, i) - assertIterateDomainCompare(t, st, truth) - } -} - -func TestCacheKVMergeIteratorChunks(t *testing.T) { - st := newCacheKVStore() - - // Use the truth to check values on the merge iterator - truth := dbm.NewMemDB() - - // sets to the parent - setRange(t, st, truth, 0, 20) - setRange(t, st, truth, 40, 60) - st.Write() - - // sets to the cache - setRange(t, st, truth, 20, 40) - setRange(t, st, truth, 60, 80) - assertIterateDomainCheck(t, st, truth, []keyRange{{0, 80}}) - - // remove some parents and some cache - deleteRange(t, st, truth, 15, 25) - assertIterateDomainCheck(t, st, truth, []keyRange{{0, 15}, {25, 80}}) - - // remove some parents and some cache - deleteRange(t, st, truth, 35, 45) - assertIterateDomainCheck(t, st, truth, []keyRange{{0, 15}, {25, 35}, {45, 80}}) - - // write, add more to the cache, and delete some cache - st.Write() - setRange(t, st, truth, 38, 42) - deleteRange(t, st, truth, 40, 43) - assertIterateDomainCheck(t, st, truth, []keyRange{{0, 15}, {25, 35}, {38, 40}, {45, 80}}) -} - -func TestCacheKVMergeIteratorDomain(t *testing.T) { - st := newCacheKVStore() - - itr := st.Iterator(nil, nil) - start, end := itr.Domain() - require.Equal(t, start, end) - require.NoError(t, itr.Close()) - - itr = st.Iterator(keyFmt(40), keyFmt(60)) - start, end = itr.Domain() - require.Equal(t, keyFmt(40), start) - require.Equal(t, keyFmt(60), end) - require.NoError(t, itr.Close()) - - start, end = st.ReverseIterator(keyFmt(0), keyFmt(80)).Domain() - require.Equal(t, keyFmt(0), start) - require.Equal(t, keyFmt(80), end) -} - -func TestCacheKVMergeIteratorRandom(t *testing.T) { - st := newCacheKVStore() - truth := dbm.NewMemDB() - - start, end := 25, 975 - max := 1000 - setRange(t, st, truth, start, end) - - // do an op, test the iterator - for i := 0; i < 2000; i++ { - doRandomOp(t, st, truth, max) - assertIterateDomainCompare(t, st, truth) - } -} - -func TestNilEndIterator(t *testing.T) { - const SIZE = 3000 - - tests := []struct { - name string - write bool - startIndex int - end []byte - }{ - {name: "write=false, end=nil", write: false, end: nil, startIndex: 1000}, - {name: "write=false, end=nil; full key scan", write: false, end: nil, startIndex: 2000}, - {name: "write=true, end=nil", write: true, end: nil, startIndex: 1000}, - {name: "write=false, end=non-nil", write: false, end: keyFmt(3000), startIndex: 1000}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - st := newCacheKVStore() - - for i := 0; i < SIZE; i++ { - kstr := keyFmt(i) - st.Set(kstr, valFmt(i)) - } - - if tt.write { - st.Write() - } - - itr := st.Iterator(keyFmt(tt.startIndex), tt.end) - i := tt.startIndex - j := 0 - for itr.Valid() { - require.Equal(t, keyFmt(i), itr.Key()) - require.Equal(t, valFmt(i), itr.Value()) - itr.Next() - i++ - j++ - } - - require.Equal(t, SIZE-tt.startIndex, j) - require.NoError(t, itr.Close()) - }) - } -} - -// TestIteratorDeadlock demonstrate the deadlock issue in cache store. -func TestIteratorDeadlock(t *testing.T) { - mem := dbadapter.Store{DB: dbm.NewMemDB()} - store := cachekv.NewStore(mem) - // the channel buffer is 64 and received once, so put at least 66 elements. - for i := 0; i < 66; i++ { - store.Set([]byte(fmt.Sprintf("key%d", i)), []byte{1}) - } - it := store.Iterator(nil, nil) - defer it.Close() - store.Set([]byte("key20"), []byte{1}) - // it'll be blocked here with previous version, or enable lock on btree. - it2 := store.Iterator(nil, nil) - defer it2.Close() -} - -//------------------------------------------------------------------------------------------- -// do some random ops - -const ( - opSet = 0 - opSetRange = 1 - opDel = 2 - opDelRange = 3 - opWrite = 4 - - totalOps = 5 // number of possible operations -) - -func randInt(n int) int { - return tmrand.NewRand().Int() % n -} - -// useful for replaying a error case if we find one -func doOp(t *testing.T, st types.CacheKVStore, truth dbm.DB, op int, args ...int) { - switch op { - case opSet: - k := args[0] - st.Set(keyFmt(k), valFmt(k)) - err := truth.Set(keyFmt(k), valFmt(k)) - require.NoError(t, err) - case opSetRange: - start := args[0] - end := args[1] - setRange(t, st, truth, start, end) - case opDel: - k := args[0] - st.Delete(keyFmt(k)) - err := truth.Delete(keyFmt(k)) - require.NoError(t, err) - case opDelRange: - start := args[0] - end := args[1] - deleteRange(t, st, truth, start, end) - case opWrite: - st.Write() - } -} - -func doRandomOp(t *testing.T, st types.CacheKVStore, truth dbm.DB, maxKey int) { - r := randInt(totalOps) - switch r { - case opSet: - k := randInt(maxKey) - st.Set(keyFmt(k), valFmt(k)) - err := truth.Set(keyFmt(k), valFmt(k)) - require.NoError(t, err) - case opSetRange: - start := randInt(maxKey - 2) - end := randInt(maxKey-start) + start - setRange(t, st, truth, start, end) - case opDel: - k := randInt(maxKey) - st.Delete(keyFmt(k)) - err := truth.Delete(keyFmt(k)) - require.NoError(t, err) - case opDelRange: - start := randInt(maxKey - 2) - end := randInt(maxKey-start) + start - deleteRange(t, st, truth, start, end) - case opWrite: - st.Write() - } -} - -//------------------------------------------------------------------------------------------- - -// iterate over whole domain -func assertIterateDomain(t *testing.T, st types.KVStore, expectedN int) { - itr := st.Iterator(nil, nil) - i := 0 - for ; itr.Valid(); itr.Next() { - k, v := itr.Key(), itr.Value() - require.Equal(t, keyFmt(i), k) - require.Equal(t, valFmt(i), v) - i++ - } - require.Equal(t, expectedN, i) - require.NoError(t, itr.Close()) -} - -func assertIterateDomainCheck(t *testing.T, st types.KVStore, mem dbm.DB, r []keyRange) { - // iterate over each and check they match the other - itr := st.Iterator(nil, nil) - itr2, err := mem.Iterator(nil, nil) // ground truth - require.NoError(t, err) - - krc := newKeyRangeCounter(r) - i := 0 - - for ; krc.valid(); krc.next() { - require.True(t, itr.Valid()) - require.True(t, itr2.Valid()) - - // check the key/val matches the ground truth - k, v := itr.Key(), itr.Value() - k2, v2 := itr2.Key(), itr2.Value() - require.Equal(t, k, k2) - require.Equal(t, v, v2) - - // check they match the counter - require.Equal(t, k, keyFmt(krc.key())) - - itr.Next() - itr2.Next() - i++ - } - - require.False(t, itr.Valid()) - require.False(t, itr2.Valid()) - require.NoError(t, itr.Close()) - require.NoError(t, itr2.Close()) -} - -func assertIterateDomainCompare(t *testing.T, st types.KVStore, mem dbm.DB) { - // iterate over each and check they match the other - itr := st.Iterator(nil, nil) - itr2, err := mem.Iterator(nil, nil) // ground truth - require.NoError(t, err) - checkIterators(t, itr, itr2) - checkIterators(t, itr2, itr) - require.NoError(t, itr.Close()) - require.NoError(t, itr2.Close()) -} - -func checkIterators(t *testing.T, itr, itr2 types.Iterator) { - for ; itr.Valid(); itr.Next() { - require.True(t, itr2.Valid()) - k, v := itr.Key(), itr.Value() - k2, v2 := itr2.Key(), itr2.Value() - require.Equal(t, k, k2) - require.Equal(t, v, v2) - itr2.Next() - } - require.False(t, itr.Valid()) - require.False(t, itr2.Valid()) -} - -//-------------------------------------------------------- - -func setRange(t *testing.T, st types.KVStore, mem dbm.DB, start, end int) { - for i := start; i < end; i++ { - st.Set(keyFmt(i), valFmt(i)) - err := mem.Set(keyFmt(i), valFmt(i)) - require.NoError(t, err) - } -} - -func deleteRange(t *testing.T, st types.KVStore, mem dbm.DB, start, end int) { - for i := start; i < end; i++ { - st.Delete(keyFmt(i)) - err := mem.Delete(keyFmt(i)) - require.NoError(t, err) - } -} - -//-------------------------------------------------------- - -type keyRange struct { - start int - end int -} - -func (kr keyRange) len() int { - return kr.end - kr.start -} - -func newKeyRangeCounter(kr []keyRange) *keyRangeCounter { - return &keyRangeCounter{keyRanges: kr} -} - -// we can iterate over this and make sure our real iterators have all the right keys -type keyRangeCounter struct { - rangeIdx int - idx int - keyRanges []keyRange -} - -func (krc *keyRangeCounter) valid() bool { - maxRangeIdx := len(krc.keyRanges) - 1 - maxRange := krc.keyRanges[maxRangeIdx] - - // if we're not in the max range, we're valid - if krc.rangeIdx <= maxRangeIdx && - krc.idx < maxRange.len() { - return true - } - - return false -} - -func (krc *keyRangeCounter) next() { - thisKeyRange := krc.keyRanges[krc.rangeIdx] - if krc.idx == thisKeyRange.len()-1 { - krc.rangeIdx++ - krc.idx = 0 - } else { - krc.idx++ - } -} - -func (krc *keyRangeCounter) key() int { - thisKeyRange := krc.keyRanges[krc.rangeIdx] - return thisKeyRange.start + krc.idx -} - -//-------------------------------------------------------- - -func bz(s string) []byte { return []byte(s) } - -func BenchmarkCacheKVStoreGetNoKeyFound(b *testing.B) { - b.ReportAllocs() - st := newCacheKVStore() - b.ResetTimer() - // assumes b.N < 2**24 - for i := 0; i < b.N; i++ { - st.Get([]byte{byte((i & 0xFF0000) >> 16), byte((i & 0xFF00) >> 8), byte(i & 0xFF)}) - } -} - -func BenchmarkCacheKVStoreGetKeyFound(b *testing.B) { - b.ReportAllocs() - st := newCacheKVStore() - for i := 0; i < b.N; i++ { - arr := []byte{byte((i & 0xFF0000) >> 16), byte((i & 0xFF00) >> 8), byte(i & 0xFF)} - st.Set(arr, arr) - } - b.ResetTimer() - // assumes b.N < 2**24 - for i := 0; i < b.N; i++ { - st.Get([]byte{byte((i & 0xFF0000) >> 16), byte((i & 0xFF00) >> 8), byte(i & 0xFF)}) - } -} - -//-------------------------------------------------------- - -func BenchmarkCacheKVStoreSetAndCommit(b *testing.B) { - mem := dbadapter.Store{DB: dbm.NewMemDB()} - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - store := cachekv.NewStore(mem) - store1 := store.Clone() - for j := 0; j < 10; j++ { - store1.Set(sdk.Uint64ToBigEndian(uint64(i+j)), []byte{byte(i)}) - } - store.Restore(store1) - store.Write() - } -} diff --git a/store/cachemulti/store.go b/store/cachemulti/store.go deleted file mode 100644 index 2a78c8ad7c..0000000000 --- a/store/cachemulti/store.go +++ /dev/null @@ -1,191 +0,0 @@ -package cachemulti - -import ( - "fmt" - "io" - - "cosmossdk.io/store/tracekv" - "cosmossdk.io/store/types" - "github.com/evmos/ethermint/store/cachekv" -) - -// storeNameCtxKey is the TraceContext metadata key that identifies -// the store which emitted a given trace. -const storeNameCtxKey = "store_name" - -//---------------------------------------- -// Store - -// Store holds many branched stores. -// Implements MultiStore. -// NOTE: a Store (and MultiStores in general) should never expose the -// keys for the substores. -type Store struct { - stores map[types.StoreKey]*cachekv.Store - - traceWriter io.Writer - traceContext types.TraceContext -} - -var _ types.CacheMultiStore = Store{} - -// NewFromKVStore creates a new Store object from a mapping of store keys to -// CacheWrapper objects and a KVStore as the database. Each CacheWrapper store -// is a branched store. -func NewFromKVStore( - stores map[types.StoreKey]types.KVStore, - traceWriter io.Writer, traceContext types.TraceContext, -) Store { - cms := Store{ - stores: make(map[types.StoreKey]*cachekv.Store, len(stores)), - traceWriter: traceWriter, - traceContext: traceContext, - } - - for key, store := range stores { - if cms.TracingEnabled() { - tctx := cms.traceContext.Clone().Merge(types.TraceContext{ - storeNameCtxKey: key.Name(), - }) - - store = tracekv.NewStore(store, cms.traceWriter, tctx) - } - cms.stores[key] = cachekv.NewStore(store) - } - - return cms -} - -// NewStore creates a new Store object from parent rootmulti store, it branch out inner store of the specified keys. -func NewStore( - parent types.MultiStore, keys map[string]types.StoreKey, -) Store { - stores := make(map[types.StoreKey]types.KVStore, len(keys)) - for _, key := range keys { - stores[key] = parent.GetKVStore(key) - } - return NewFromKVStore(stores, nil, nil) -} - -func newCacheMultiStoreFromCMS(cms Store) Store { - stores := make(map[types.StoreKey]types.KVStore, len(cms.stores)) - for k, v := range cms.stores { - stores[k] = v - } - - return NewFromKVStore(stores, cms.traceWriter, cms.traceContext) -} - -// SetTracer sets the tracer for the MultiStore that the underlying -// stores will utilize to trace operations. A MultiStore is returned. -func (cms Store) SetTracer(w io.Writer) types.MultiStore { - cms.traceWriter = w - return cms -} - -// SetTracingContext updates the tracing context for the MultiStore by merging -// the given context with the existing context by key. Any existing keys will -// be overwritten. It is implied that the caller should update the context when -// necessary between tracing operations. It returns a modified MultiStore. -func (cms Store) SetTracingContext(tc types.TraceContext) types.MultiStore { - if cms.traceContext != nil { - for k, v := range tc { - cms.traceContext[k] = v - } - } else { - cms.traceContext = tc - } - - return cms -} - -// TracingEnabled returns if tracing is enabled for the MultiStore. -func (cms Store) TracingEnabled() bool { - return cms.traceWriter != nil -} - -// LatestVersion returns the branch version of the store -func (cms Store) LatestVersion() int64 { - panic("cannot get latest version from branch cached multi-store") -} - -// GetStoreType returns the type of the store. -func (cms Store) GetStoreType() types.StoreType { - return types.StoreTypeMulti -} - -// Write calls Write on each underlying store. -func (cms Store) Write() { - for _, store := range cms.stores { - store.Write() - } -} - -// Clone creates a snapshot of each store of the cache-multistore. -// Each copy is a copy-on-write operation and therefore is very fast. -func (cms Store) Clone() types.CacheMultiStore { - stores := make(map[types.StoreKey]*cachekv.Store, len(cms.stores)) - for key, store := range cms.stores { - stores[key] = store.Clone() - } - return Store{ - stores: stores, - - traceWriter: cms.traceWriter, - traceContext: cms.traceContext, - } -} - -// Restore restores the cache-multistore cache to a given snapshot. -func (cms Store) Restore(s types.CacheMultiStore) { - ms := s.(Store) - for key, store := range cms.stores { - otherStore, ok := ms.stores[key] - if !ok { - panic("Invariant violation: Restore should only be called on a store cloned from itself") - } - store.Restore(otherStore) - } -} - -// Implements CacheWrapper. -func (cms Store) CacheWrap() types.CacheWrap { - return cms.CacheMultiStore().(types.CacheWrap) -} - -// CacheWrapWithTrace implements the CacheWrapper interface. -func (cms Store) CacheWrapWithTrace(_ io.Writer, _ types.TraceContext) types.CacheWrap { - return cms.CacheWrap() -} - -// Implements MultiStore. -func (cms Store) CacheMultiStore() types.CacheMultiStore { - return newCacheMultiStoreFromCMS(cms) -} - -// CacheMultiStoreWithVersion implements the MultiStore interface. It will panic -// as an already cached multi-store cannot load previous versions. -// -// TODO: The store implementation can possibly be modified to support this as it -// seems safe to load previous versions (heights). -func (cms Store) CacheMultiStoreWithVersion(_ int64) (types.CacheMultiStore, error) { - panic("cannot branch cached multi-store with a version") -} - -// GetStore returns an underlying Store by key. -func (cms Store) GetStore(key types.StoreKey) types.Store { - s := cms.stores[key] - if key == nil || s == nil { - panic(fmt.Sprintf("kv store with key %v has not been registered in stores", key)) - } - return types.Store(s) -} - -// GetKVStore returns an underlying KVStore by key. -func (cms Store) GetKVStore(key types.StoreKey) types.KVStore { - store := cms.stores[key] - if key == nil || store == nil { - panic(fmt.Sprintf("kv store with key %v has not been registered in stores", key)) - } - return types.KVStore(store) -} diff --git a/store/cachemulti/store_test.go b/store/cachemulti/store_test.go deleted file mode 100644 index 84d67cab91..0000000000 --- a/store/cachemulti/store_test.go +++ /dev/null @@ -1,25 +0,0 @@ -package cachemulti - -import ( - "fmt" - "testing" - - "github.com/evmos/ethermint/store/cachekv" - "github.com/stretchr/testify/require" - - "cosmossdk.io/store/types" -) - -func TestStoreGetKVStore(t *testing.T) { - require := require.New(t) - - s := Store{stores: map[types.StoreKey]*cachekv.Store{}} - key := types.NewKVStoreKey("abc") - errMsg := fmt.Sprintf("kv store with key %v has not been registered in stores", key) - - require.PanicsWithValue(errMsg, - func() { s.GetStore(key) }) - - require.PanicsWithValue(errMsg, - func() { s.GetKVStore(key) }) -} diff --git a/x/evm/keeper/keeper.go b/x/evm/keeper/keeper.go index 299f8de643..08e2957ae4 100644 --- a/x/evm/keeper/keeper.go +++ b/x/evm/keeper/keeper.go @@ -74,10 +74,6 @@ type Keeper struct { hooks types.EvmHooks customContractFns []CustomContractFn - - // a set of store keys that should cover all the precompile use cases, - // or ideally just pass the application's all stores. - keys map[string]storetypes.StoreKey } // NewKeeper generates new evm module keeper @@ -92,7 +88,6 @@ func NewKeeper( fmk types.FeeMarketKeeper, tracer string, customContractFns []CustomContractFn, - keys map[string]storetypes.StoreKey, ) *Keeper { // ensure evm module account is set if addr := ak.GetModuleAddress(types.ModuleName); addr == nil { @@ -117,14 +112,9 @@ func NewKeeper( transientKey: transientKey, tracer: tracer, customContractFns: customContractFns, - keys: keys, } } -func (k Keeper) StoreKeys() map[string]storetypes.StoreKey { - return k.keys -} - // Logger returns a module-specific logger. func (k Keeper) Logger(ctx sdk.Context) log.Logger { sdkCtx := sdk.UnwrapSDKContext(ctx) diff --git a/x/evm/statedb/interfaces.go b/x/evm/statedb/interfaces.go index 95d3c8641b..c483b0bbdc 100644 --- a/x/evm/statedb/interfaces.go +++ b/x/evm/statedb/interfaces.go @@ -18,7 +18,6 @@ package statedb import ( "math/big" - storetypes "cosmossdk.io/store/types" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/ethereum/go-ethereum/common" evmtypes "github.com/evmos/ethermint/x/evm/types" @@ -26,8 +25,6 @@ import ( // Keeper provide underlying storage of StateDB type Keeper interface { - // for cache store wrapping - StoreKeys() map[string]storetypes.StoreKey GetParams(sdk.Context) evmtypes.Params AddBalance(ctx sdk.Context, addr sdk.AccAddress, coins sdk.Coins) error diff --git a/x/evm/statedb/native.go b/x/evm/statedb/native.go index 4abd4d9489..35cde15cf7 100644 --- a/x/evm/statedb/native.go +++ b/x/evm/statedb/native.go @@ -1,14 +1,14 @@ package statedb import ( - "cosmossdk.io/store/types" + "cosmossdk.io/store/cachemulti" "github.com/ethereum/go-ethereum/common" ) var _ JournalEntry = nativeChange{} type nativeChange struct { - snapshot types.MultiStore + snapshot cachemulti.Store events int } @@ -17,6 +17,6 @@ func (native nativeChange) Dirtied() *common.Address { } func (native nativeChange) Revert(s *StateDB) { - s.restoreNativeState(native.snapshot) + s.revertNativeStateToSnapshot(native.snapshot) s.nativeEvents = s.nativeEvents[:len(s.nativeEvents)-native.events] } diff --git a/x/evm/statedb/statedb.go b/x/evm/statedb/statedb.go index f4effb5e90..9422e7d5b6 100644 --- a/x/evm/statedb/statedb.go +++ b/x/evm/statedb/statedb.go @@ -22,15 +22,13 @@ import ( errorsmod "cosmossdk.io/errors" sdkmath "cosmossdk.io/math" - storetypes "cosmossdk.io/store/types" + "cosmossdk.io/store/cachemulti" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/ethereum/go-ethereum/common" ethtypes "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" - - "github.com/evmos/ethermint/store/cachemulti" ) const StateDBContextKey = "statedb" @@ -53,9 +51,19 @@ var _ vm.StateDB = &StateDB{} // * Contracts // * Accounts type StateDB struct { - keeper Keeper - ctx sdk.Context - cacheCtx sdk.Context + keeper Keeper + // origCtx is the context passed in by the caller + origCtx sdk.Context + // ctx is a branched context on top of the caller context + ctx sdk.Context + // cacheMS caches the `ctx.MultiStore()` to avoid type assertions all the time, `ctx.MultiStore()` is not modified during the whole time, + // which is evident by `ctx.WithMultiStore` is not called after statedb constructed. + cacheMS cachemulti.Store + + // the action to commit native state, there are two cases: + // if the parent store is not `cachemulti.Store`, we create a new one, and call `Write` to commit, this could only happen in unit tests. + // if the parent store is already a `cachemulti.Store`, we branch it and call `Restore` to commit. + commitMS func() // Journal of state modifications. This is the backbone of // Snapshot and RevertToSnapshot. @@ -93,8 +101,25 @@ func New(ctx sdk.Context, keeper Keeper, txConfig TxConfig) *StateDB { } func NewWithParams(ctx sdk.Context, keeper Keeper, txConfig TxConfig, evmDenom string) *StateDB { + var ( + cacheMS cachemulti.Store + commitMS func() + ) + if parentCacheMS, ok := ctx.MultiStore().(cachemulti.Store); ok { + cacheMS = parentCacheMS.Clone() + commitMS = func() { parentCacheMS.Restore(cacheMS) } + } else { + // in unit test, it could be run with a uncached multistore + if cacheMS, ok = ctx.MultiStore().CacheWrap().(cachemulti.Store); !ok { + panic("expect the CacheWrap result to be cachemulti.Store") + } + commitMS = cacheMS.Write + } db := &StateDB{ + origCtx: ctx, keeper: keeper, + cacheMS: cacheMS, + commitMS: commitMS, stateObjects: make(map[common.Address]*stateObject), journal: newJournal(), accessList: newAccessList(), @@ -104,8 +129,7 @@ func NewWithParams(ctx sdk.Context, keeper Keeper, txConfig TxConfig, evmDenom s nativeEvents: sdk.Events{}, evmDenom: evmDenom, } - db.ctx = ctx.WithValue(StateDBContextKey, db) - db.cacheCtx = db.ctx.WithMultiStore(cachemulti.NewStore(ctx.MultiStore(), keeper.StoreKeys())) + db.ctx = ctx.WithValue(StateDBContextKey, db).WithMultiStore(cacheMS) return db } @@ -113,13 +137,6 @@ func (s *StateDB) NativeEvents() sdk.Events { return s.nativeEvents } -// cacheMultiStore cast the multistore to *cachemulti.Store. -// invariant: the multistore must be a `cachemulti.Store`, -// prove: it's set in constructor and only modified in `restoreNativeState` which keeps the invariant. -func (s *StateDB) cacheMultiStore() cachemulti.Store { - return s.cacheCtx.MultiStore().(cachemulti.Store) -} - // Keeper returns the underlying `Keeper` func (s *StateDB) Keeper() Keeper { return s.keeper @@ -173,7 +190,7 @@ func (s *StateDB) Empty(addr common.Address) bool { // GetBalance retrieves the balance from the given address or 0 if object not found func (s *StateDB) GetBalance(addr common.Address) *big.Int { - return s.keeper.GetBalance(s.cacheCtx, sdk.AccAddress(addr.Bytes()), s.evmDenom) + return s.keeper.GetBalance(s.ctx, sdk.AccAddress(addr.Bytes()), s.evmDenom) } // GetNonce returns the nonce of account, 0 if not exists. @@ -329,24 +346,23 @@ func (s *StateDB) setStateObject(object *stateObject) { s.stateObjects[object.Address()] = object } -func (s *StateDB) cloneNativeState() storetypes.MultiStore { - return s.cacheMultiStore().Clone() +func (s *StateDB) snapshotNativeState() cachemulti.Store { + return s.cacheMS.Clone() } -func (s *StateDB) restoreNativeState(ms storetypes.MultiStore) { - manager := sdk.NewEventManager() - s.cacheCtx = s.cacheCtx.WithMultiStore(ms).WithEventManager(manager) +func (s *StateDB) revertNativeStateToSnapshot(ms cachemulti.Store) { + s.cacheMS.Restore(ms) } // ExecuteNativeAction executes native action in isolate, // the writes will be revert when either the native action itself fail // or the wrapping message call reverted. func (s *StateDB) ExecuteNativeAction(contract common.Address, converter EventConverter, action func(ctx sdk.Context) error) error { - snapshot := s.cloneNativeState() + snapshot := s.snapshotNativeState() eventManager := sdk.NewEventManager() - if err := action(s.cacheCtx.WithEventManager(eventManager)); err != nil { - s.restoreNativeState(snapshot) + if err := action(s.ctx.WithEventManager(eventManager)); err != nil { + s.revertNativeStateToSnapshot(snapshot) return err } @@ -357,9 +373,9 @@ func (s *StateDB) ExecuteNativeAction(contract common.Address, converter EventCo return nil } -// CacheContext returns a branched state context for executing read-only native actions. -func (s *StateDB) CacheContext() sdk.Context { - return s.cacheCtx.WithMultiStore(s.cloneNativeState()) +// Context returns the current context for query native state in precompiles. +func (s *StateDB) Context() sdk.Context { + return s.ctx } /* @@ -601,24 +617,25 @@ func (s *StateDB) Commit() error { // commit the native cache store first, // the states managed by precompiles and the other part of StateDB must not overlap. - s.cacheMultiStore().Write() + // after this, should only use the `origCtx`. + s.commitMS() if len(s.nativeEvents) > 0 { - s.ctx.EventManager().EmitEvents(s.nativeEvents) + s.origCtx.EventManager().EmitEvents(s.nativeEvents) } for _, addr := range s.journal.sortedDirties() { obj := s.stateObjects[addr] if obj.suicided { - if err := s.keeper.DeleteAccount(s.ctx, obj.Address()); err != nil { + if err := s.keeper.DeleteAccount(s.origCtx, obj.Address()); err != nil { return errorsmod.Wrap(err, "failed to delete account") } } else { codeDirty := obj.codeDirty() if codeDirty && obj.code != nil { - s.keeper.SetCode(s.ctx, obj.CodeHash(), obj.code) + s.keeper.SetCode(s.origCtx, obj.CodeHash(), obj.code) } if codeDirty || obj.nonceDirty() { - if err := s.keeper.SetAccount(s.ctx, obj.Address(), obj.account); err != nil { + if err := s.keeper.SetAccount(s.origCtx, obj.Address(), obj.account); err != nil { return errorsmod.Wrap(err, "failed to set account") } } @@ -628,7 +645,7 @@ func (s *StateDB) Commit() error { if value == obj.originStorage[key] { continue } - s.keeper.SetState(s.ctx, obj.Address(), key, value.Bytes()) + s.keeper.SetState(s.origCtx, obj.Address(), key, value.Bytes()) } } } diff --git a/x/evm/statedb/statedb_test.go b/x/evm/statedb/statedb_test.go index 5cae6986fe..3bebbc4195 100644 --- a/x/evm/statedb/statedb_test.go +++ b/x/evm/statedb/statedb_test.go @@ -767,6 +767,7 @@ func CollectContractStorage(db vm.StateDB, address common.Address) statedb.Stora var ( testStoreKeys = storetypes.NewKVStoreKeys(authtypes.StoreKey, banktypes.StoreKey, evmtypes.StoreKey, "testnative") + testObjKeys = storetypes.NewObjectStoreKeys(banktypes.ObjectStoreKey) testTransientKeys = storetypes.NewTransientStoreKeys(evmtypes.TransientKey) testMemKeys = storetypes.NewMemoryStoreKeys(capabilitytypes.MemStoreKey) ) @@ -807,21 +808,12 @@ func newTestKeeper(t *testing.T, cms storetypes.MultiStore) (sdk.Context, *evmke bankKeeper := bankkeeper.NewBaseKeeper( appCodec, runtime.NewKVStoreService(testStoreKeys[banktypes.StoreKey]), + testObjKeys[banktypes.ObjectStoreKey], accountKeeper, map[string]bool{}, authAddr, log.NewNopLogger(), ) - allKeys := make(map[string]storetypes.StoreKey, len(testStoreKeys)+len(testTransientKeys)+len(testMemKeys)) - for k, v := range testStoreKeys { - allKeys[k] = v - } - for k, v := range testTransientKeys { - allKeys[k] = v - } - for k, v := range testMemKeys { - allKeys[k] = v - } evmKeeper := evmkeeper.NewKeeper( appCodec, runtime.NewKVStoreService(testStoreKeys[evmtypes.StoreKey]), @@ -829,7 +821,6 @@ func newTestKeeper(t *testing.T, cms storetypes.MultiStore) (sdk.Context, *evmke accountKeeper, bankKeeper, nil, nil, "", nil, - allKeys, ) ctx := sdk.NewContext(cms, tmproto.Header{}, false, log.NewNopLogger()) @@ -848,6 +839,9 @@ func setupTestEnv(t *testing.T) (storetypes.MultiStore, sdk.Context, *evmkeeper. for _, key := range testMemKeys { cms.MountStoreWithDB(key, storetypes.StoreTypeMemory, nil) } + for _, key := range testObjKeys { + cms.MountStoreWithDB(key, storetypes.StoreTypeObject, nil) + } require.NoError(t, cms.LoadLatestVersion()) ctx, keeper := newTestKeeper(t, cms)