From 59c1f0fcdef6a4a3c6fe182617d84106ea2246a1 Mon Sep 17 00:00:00 2001 From: Youngtaek Yoon Date: Wed, 27 Dec 2023 09:35:57 +0000 Subject: [PATCH] Remove baseapp --- baseapp/abci.go | 1324 ----------- baseapp/abci_test.go | 2355 ------------------- baseapp/abci_utils.go | 398 ---- baseapp/abci_utils_test.go | 379 --- baseapp/baseapp.go | 1131 --------- baseapp/baseapp_test.go | 766 ------ baseapp/block_gas_test.go | 257 -- baseapp/circuit.go | 8 - baseapp/genesis.go | 23 - baseapp/grpcrouter.go | 159 -- baseapp/grpcrouter_helpers.go | 64 - baseapp/grpcrouter_test.go | 223 -- baseapp/grpcserver.go | 102 - baseapp/info.go | 197 -- baseapp/internal/protocompat/protocompat.go | 222 -- baseapp/msg_service_router.go | 221 -- baseapp/msg_service_router_test.go | 202 -- baseapp/oe/optimistic_execution.go | 157 -- baseapp/oe/optimistic_execution_test.go | 34 - baseapp/options.go | 359 --- baseapp/params.go | 15 - baseapp/params_legacy.go | 150 -- baseapp/recovery.go | 80 - baseapp/recovery_test.go | 64 - baseapp/snapshot_test.go | 347 --- baseapp/state.go | 23 - baseapp/streaming.go | 112 - baseapp/streaming_test.go | 146 -- baseapp/test_helpers.go | 78 - baseapp/testutil/buf.gen.yaml | 5 - baseapp/testutil/buf.lock | 23 - baseapp/testutil/buf.yaml | 5 - baseapp/testutil/messages.go | 65 - baseapp/testutil/messages.pb.go | 1397 ----------- baseapp/testutil/messages.proto | 47 - baseapp/testutil/mock/mocks.go | 235 -- baseapp/utils_test.go | 392 --- go.mod | 10 +- 38 files changed, 5 insertions(+), 11770 deletions(-) delete mode 100644 baseapp/abci.go delete mode 100644 baseapp/abci_test.go delete mode 100644 baseapp/abci_utils.go delete mode 100644 baseapp/abci_utils_test.go delete mode 100644 baseapp/baseapp.go delete mode 100644 baseapp/baseapp_test.go delete mode 100644 baseapp/block_gas_test.go delete mode 100644 baseapp/circuit.go delete mode 100644 baseapp/genesis.go delete mode 100644 baseapp/grpcrouter.go delete mode 100644 baseapp/grpcrouter_helpers.go delete mode 100644 baseapp/grpcrouter_test.go delete mode 100644 baseapp/grpcserver.go delete mode 100644 baseapp/info.go delete mode 100644 baseapp/internal/protocompat/protocompat.go delete mode 100644 baseapp/msg_service_router.go delete mode 100644 baseapp/msg_service_router_test.go delete mode 100644 baseapp/oe/optimistic_execution.go delete mode 100644 baseapp/oe/optimistic_execution_test.go delete mode 100644 baseapp/options.go delete mode 100644 baseapp/params.go delete mode 100644 baseapp/params_legacy.go delete mode 100644 baseapp/recovery.go delete mode 100644 baseapp/recovery_test.go delete mode 100644 baseapp/snapshot_test.go delete mode 100644 baseapp/state.go delete mode 100644 baseapp/streaming.go delete mode 100644 baseapp/streaming_test.go delete mode 100644 baseapp/test_helpers.go delete mode 100644 baseapp/testutil/buf.gen.yaml delete mode 100644 baseapp/testutil/buf.lock delete mode 100644 baseapp/testutil/buf.yaml delete mode 100644 baseapp/testutil/messages.go delete mode 100644 baseapp/testutil/messages.pb.go delete mode 100644 baseapp/testutil/messages.proto delete mode 100644 baseapp/testutil/mock/mocks.go delete mode 100644 baseapp/utils_test.go diff --git a/baseapp/abci.go b/baseapp/abci.go deleted file mode 100644 index 8a4daf68a1..0000000000 --- a/baseapp/abci.go +++ /dev/null @@ -1,1324 +0,0 @@ -package baseapp - -import ( - "context" - "crypto/sha256" - "fmt" - "sort" - "strings" - "time" - - "github.com/cockroachdb/errors" - abci "github.com/cometbft/cometbft/abci/types" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" - "github.com/cosmos/gogoproto/proto" - "google.golang.org/grpc/codes" - grpcstatus "google.golang.org/grpc/status" - - coreheader "cosmossdk.io/core/header" - errorsmod "cosmossdk.io/errors" - "cosmossdk.io/store/rootmulti" - snapshottypes "cosmossdk.io/store/snapshots/types" - storetypes "cosmossdk.io/store/types" - - "github.com/cosmos/cosmos-sdk/codec" - "github.com/cosmos/cosmos-sdk/telemetry" - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" -) - -// Supported ABCI Query prefixes and paths -const ( - QueryPathApp = "app" - QueryPathCustom = "custom" - QueryPathP2P = "p2p" - QueryPathStore = "store" - - QueryPathBroadcastTx = "/cosmos.tx.v1beta1.Service/BroadcastTx" -) - -func (app *BaseApp) InitChain(req *abci.RequestInitChain) (*abci.ResponseInitChain, error) { - if req.ChainId != app.chainID { - return nil, fmt.Errorf("invalid chain-id on InitChain; expected: %s, got: %s", app.chainID, req.ChainId) - } - - // On a new chain, we consider the init chain block height as 0, even though - // req.InitialHeight is 1 by default. - initHeader := cmtproto.Header{ChainID: req.ChainId, Time: req.Time} - app.logger.Info("InitChain", "initialHeight", req.InitialHeight, "chainID", req.ChainId) - - // Set the initial height, which will be used to determine if we are proposing - // or processing the first block or not. - app.initialHeight = req.InitialHeight - if app.initialHeight == 0 { // If initial height is 0, set it to 1 - app.initialHeight = 1 - } - - // if req.InitialHeight is > 1, then we set the initial version on all stores - if req.InitialHeight > 1 { - initHeader.Height = req.InitialHeight - if err := app.cms.SetInitialVersion(req.InitialHeight); err != nil { - return nil, err - } - } - - // initialize states with a correct header - app.setState(execModeFinalize, initHeader) - app.setState(execModeCheck, initHeader) - - // Store the consensus params in the BaseApp's param store. Note, this must be - // done after the finalizeBlockState and context have been set as it's persisted - // to state. - if req.ConsensusParams != nil { - err := app.StoreConsensusParams(app.finalizeBlockState.ctx, *req.ConsensusParams) - if err != nil { - return nil, err - } - } - - defer func() { - // InitChain represents the state of the application BEFORE the first block, - // i.e. the genesis block. This means that when processing the app's InitChain - // handler, the block height is zero by default. However, after Commit is called - // the height needs to reflect the true block height. - initHeader.Height = req.InitialHeight - app.checkState.ctx = app.checkState.ctx.WithBlockHeader(initHeader). - WithHeaderInfo(coreheader.Info{ - ChainID: req.ChainId, - Height: req.InitialHeight, - Time: req.Time, - }) - app.finalizeBlockState.ctx = app.finalizeBlockState.ctx.WithBlockHeader(initHeader). - WithHeaderInfo(coreheader.Info{ - ChainID: req.ChainId, - Height: req.InitialHeight, - Time: req.Time, - }) - }() - - if app.initChainer == nil { - return &abci.ResponseInitChain{}, nil - } - - // add block gas meter for any genesis transactions (allow infinite gas) - app.finalizeBlockState.ctx = app.finalizeBlockState.ctx.WithBlockGasMeter(storetypes.NewInfiniteGasMeter()) - - res, err := app.initChainer(app.finalizeBlockState.ctx, req) - if err != nil { - return nil, err - } - - if len(req.Validators) > 0 { - if len(req.Validators) != len(res.Validators) { - return nil, fmt.Errorf( - "len(RequestInitChain.Validators) != len(GenesisValidators) (%d != %d)", - len(req.Validators), len(res.Validators), - ) - } - - sort.Sort(abci.ValidatorUpdates(req.Validators)) - sort.Sort(abci.ValidatorUpdates(res.Validators)) - - for i := range res.Validators { - if !proto.Equal(&res.Validators[i], &req.Validators[i]) { - return nil, fmt.Errorf("genesisValidators[%d] != req.Validators[%d] ", i, i) - } - } - } - - // In the case of a new chain, AppHash will be the hash of an empty string. - // During an upgrade, it'll be the hash of the last committed block. - var appHash []byte - if !app.LastCommitID().IsZero() { - appHash = app.LastCommitID().Hash - } else { - // $ echo -n '' | sha256sum - // e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 - emptyHash := sha256.Sum256([]byte{}) - appHash = emptyHash[:] - } - - // NOTE: We don't commit, but FinalizeBlock for block InitialHeight starts from - // this FinalizeBlockState. - return &abci.ResponseInitChain{ - ConsensusParams: res.ConsensusParams, - Validators: res.Validators, - AppHash: appHash, - }, nil -} - -func (app *BaseApp) Info(req *abci.RequestInfo) (*abci.ResponseInfo, error) { - lastCommitID := app.cms.LastCommitID() - - return &abci.ResponseInfo{ - Data: app.name, - Version: app.version, - AppVersion: app.appVersion, - LastBlockHeight: lastCommitID.Version, - LastBlockAppHash: lastCommitID.Hash, - }, nil -} - -// Query implements the ABCI interface. It delegates to CommitMultiStore if it -// implements Queryable. -func (app *BaseApp) Query(_ context.Context, req *abci.RequestQuery) (resp *abci.ResponseQuery, err error) { - // add panic recovery for all queries - // - // Ref: https://github.com/cosmos/cosmos-sdk/pull/8039 - defer func() { - if r := recover(); r != nil { - resp = sdkerrors.QueryResult(errorsmod.Wrapf(sdkerrors.ErrPanic, "%v", r), app.trace) - } - }() - - // when a client did not provide a query height, manually inject the latest - if req.Height == 0 { - req.Height = app.LastBlockHeight() - } - - telemetry.IncrCounter(1, "query", "count") - telemetry.IncrCounter(1, "query", req.Path) - defer telemetry.MeasureSince(time.Now(), req.Path) - - if req.Path == QueryPathBroadcastTx { - return sdkerrors.QueryResult(errorsmod.Wrap(sdkerrors.ErrInvalidRequest, "can't route a broadcast tx message"), app.trace), nil - } - - // handle gRPC routes first rather than calling splitPath because '/' characters - // are used as part of gRPC paths - if grpcHandler := app.grpcQueryRouter.Route(req.Path); grpcHandler != nil { - return app.handleQueryGRPC(grpcHandler, req), nil - } - - path := SplitABCIQueryPath(req.Path) - if len(path) == 0 { - return sdkerrors.QueryResult(errorsmod.Wrap(sdkerrors.ErrUnknownRequest, "no query path provided"), app.trace), nil - } - - switch path[0] { - case QueryPathApp: - // "/app" prefix for special application queries - resp = handleQueryApp(app, path, req) - - case QueryPathStore: - resp = handleQueryStore(app, path, *req) - - case QueryPathP2P: - resp = handleQueryP2P(app, path) - - default: - resp = sdkerrors.QueryResult(errorsmod.Wrap(sdkerrors.ErrUnknownRequest, "unknown query path"), app.trace) - } - - return resp, nil -} - -// ListSnapshots implements the ABCI interface. It delegates to app.snapshotManager if set. -func (app *BaseApp) ListSnapshots(req *abci.RequestListSnapshots) (*abci.ResponseListSnapshots, error) { - resp := &abci.ResponseListSnapshots{Snapshots: []*abci.Snapshot{}} - if app.snapshotManager == nil { - return resp, nil - } - - snapshots, err := app.snapshotManager.List() - if err != nil { - app.logger.Error("failed to list snapshots", "err", err) - return nil, err - } - - for _, snapshot := range snapshots { - abciSnapshot, err := snapshot.ToABCI() - if err != nil { - app.logger.Error("failed to convert ABCI snapshots", "err", err) - return nil, err - } - - resp.Snapshots = append(resp.Snapshots, &abciSnapshot) - } - - return resp, nil -} - -// LoadSnapshotChunk implements the ABCI interface. It delegates to app.snapshotManager if set. -func (app *BaseApp) LoadSnapshotChunk(req *abci.RequestLoadSnapshotChunk) (*abci.ResponseLoadSnapshotChunk, error) { - if app.snapshotManager == nil { - return &abci.ResponseLoadSnapshotChunk{}, nil - } - - chunk, err := app.snapshotManager.LoadChunk(req.Height, req.Format, req.Chunk) - if err != nil { - app.logger.Error( - "failed to load snapshot chunk", - "height", req.Height, - "format", req.Format, - "chunk", req.Chunk, - "err", err, - ) - return nil, err - } - - return &abci.ResponseLoadSnapshotChunk{Chunk: chunk}, nil -} - -// OfferSnapshot implements the ABCI interface. It delegates to app.snapshotManager if set. -func (app *BaseApp) OfferSnapshot(req *abci.RequestOfferSnapshot) (*abci.ResponseOfferSnapshot, error) { - if app.snapshotManager == nil { - app.logger.Error("snapshot manager not configured") - return &abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, nil - } - - if req.Snapshot == nil { - app.logger.Error("received nil snapshot") - return &abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil - } - - snapshot, err := snapshottypes.SnapshotFromABCI(req.Snapshot) - if err != nil { - app.logger.Error("failed to decode snapshot metadata", "err", err) - return &abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil - } - - err = app.snapshotManager.Restore(snapshot) - switch { - case err == nil: - return &abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT}, nil - - case errors.Is(err, snapshottypes.ErrUnknownFormat): - return &abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_FORMAT}, nil - - case errors.Is(err, snapshottypes.ErrInvalidMetadata): - app.logger.Error( - "rejecting invalid snapshot", - "height", req.Snapshot.Height, - "format", req.Snapshot.Format, - "err", err, - ) - return &abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil - - default: - app.logger.Error( - "failed to restore snapshot", - "height", req.Snapshot.Height, - "format", req.Snapshot.Format, - "err", err, - ) - - // We currently don't support resetting the IAVL stores and retrying a - // different snapshot, so we ask CometBFT to abort all snapshot restoration. - return &abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, nil - } -} - -// ApplySnapshotChunk implements the ABCI interface. It delegates to app.snapshotManager if set. -func (app *BaseApp) ApplySnapshotChunk(req *abci.RequestApplySnapshotChunk) (*abci.ResponseApplySnapshotChunk, error) { - if app.snapshotManager == nil { - app.logger.Error("snapshot manager not configured") - return &abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ABORT}, nil - } - - _, err := app.snapshotManager.RestoreChunk(req.Chunk) - switch { - case err == nil: - return &abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil - - case errors.Is(err, snapshottypes.ErrChunkHashMismatch): - app.logger.Error( - "chunk checksum mismatch; rejecting sender and requesting refetch", - "chunk", req.Index, - "sender", req.Sender, - "err", err, - ) - return &abci.ResponseApplySnapshotChunk{ - Result: abci.ResponseApplySnapshotChunk_RETRY, - RefetchChunks: []uint32{req.Index}, - RejectSenders: []string{req.Sender}, - }, nil - - default: - app.logger.Error("failed to restore snapshot", "err", err) - return &abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ABORT}, nil - } -} - -// CheckTx implements the ABCI interface and executes a tx in CheckTx mode. In -// CheckTx mode, messages are not executed. This means messages are only validated -// and only the AnteHandler is executed. State is persisted to the BaseApp's -// internal CheckTx state if the AnteHandler passes. Otherwise, the ResponseCheckTx -// will contain relevant error information. Regardless of tx execution outcome, -// the ResponseCheckTx will contain relevant gas execution context. -func (app *BaseApp) CheckTx(req *abci.RequestCheckTx) (*abci.ResponseCheckTx, error) { - var mode execMode - - switch { - case req.Type == abci.CheckTxType_New: - mode = execModeCheck - - case req.Type == abci.CheckTxType_Recheck: - mode = execModeReCheck - - default: - return nil, fmt.Errorf("unknown RequestCheckTx type: %s", req.Type) - } - - gInfo, result, anteEvents, err := app.runTx(mode, req.Tx) - if err != nil { - return sdkerrors.ResponseCheckTxWithEvents(err, gInfo.GasWanted, gInfo.GasUsed, anteEvents, app.trace), nil - } - - return &abci.ResponseCheckTx{ - GasWanted: int64(gInfo.GasWanted), // TODO: Should type accept unsigned ints? - GasUsed: int64(gInfo.GasUsed), // TODO: Should type accept unsigned ints? - Log: result.Log, - Data: result.Data, - Events: sdk.MarkEventsToIndex(result.Events, app.indexEvents), - }, nil -} - -// PrepareProposal implements the PrepareProposal ABCI method and returns a -// ResponsePrepareProposal object to the client. The PrepareProposal method is -// responsible for allowing the block proposer to perform application-dependent -// work in a block before proposing it. -// -// Transactions can be modified, removed, or added by the application. Since the -// application maintains its own local mempool, it will ignore the transactions -// provided to it in RequestPrepareProposal. Instead, it will determine which -// transactions to return based on the mempool's semantics and the MaxTxBytes -// provided by the client's request. -// -// Ref: https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-060-abci-1.0.md -// Ref: https://github.com/cometbft/cometbft/blob/main/spec/abci/abci%2B%2B_basic_concepts.md -func (app *BaseApp) PrepareProposal(req *abci.RequestPrepareProposal) (resp *abci.ResponsePrepareProposal, err error) { - if app.prepareProposal == nil { - return nil, errors.New("PrepareProposal handler not set") - } - - // Always reset state given that PrepareProposal can timeout and be called - // again in a subsequent round. - header := cmtproto.Header{ - ChainID: app.chainID, - Height: req.Height, - Time: req.Time, - ProposerAddress: req.ProposerAddress, - NextValidatorsHash: req.NextValidatorsHash, - AppHash: app.LastCommitID().Hash, - } - app.setState(execModePrepareProposal, header) - - // CometBFT must never call PrepareProposal with a height of 0. - // - // Ref: https://github.com/cometbft/cometbft/blob/059798a4f5b0c9f52aa8655fa619054a0154088c/spec/core/state.md?plain=1#L37-L38 - if req.Height < 1 { - return nil, errors.New("PrepareProposal called with invalid height") - } - - app.prepareProposalState.ctx = app.getContextForProposal(app.prepareProposalState.ctx, req.Height). - WithVoteInfos(toVoteInfo(req.LocalLastCommit.Votes)). // this is a set of votes that are not finalized yet, wait for commit - WithBlockHeight(req.Height). - WithBlockTime(req.Time). - WithProposer(req.ProposerAddress). - WithExecMode(sdk.ExecModePrepareProposal). - WithCometInfo(prepareProposalInfo{req}). - WithHeaderInfo(coreheader.Info{ - ChainID: app.chainID, - Height: req.Height, - Time: req.Time, - }) - - app.prepareProposalState.ctx = app.prepareProposalState.ctx. - WithConsensusParams(app.GetConsensusParams(app.prepareProposalState.ctx)). - WithBlockGasMeter(app.getBlockGasMeter(app.prepareProposalState.ctx)) - - defer func() { - if err := recover(); err != nil { - app.logger.Error( - "panic recovered in PrepareProposal", - "height", req.Height, - "time", req.Time, - "panic", err, - ) - - resp = &abci.ResponsePrepareProposal{Txs: req.Txs} - } - }() - - resp, err = app.prepareProposal(app.prepareProposalState.ctx, req) - if err != nil { - app.logger.Error("failed to prepare proposal", "height", req.Height, "time", req.Time, "err", err) - return &abci.ResponsePrepareProposal{Txs: req.Txs}, nil - } - - return resp, nil -} - -// ProcessProposal implements the ProcessProposal ABCI method and returns a -// ResponseProcessProposal object to the client. The ProcessProposal method is -// responsible for allowing execution of application-dependent work in a proposed -// block. Note, the application defines the exact implementation details of -// ProcessProposal. In general, the application must at the very least ensure -// that all transactions are valid. If all transactions are valid, then we inform -// CometBFT that the Status is ACCEPT. However, the application is also able -// to implement optimizations such as executing the entire proposed block -// immediately. -// -// If a panic is detected during execution of an application's ProcessProposal -// handler, it will be recovered and we will reject the proposal. -// -// Ref: https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-060-abci-1.0.md -// Ref: https://github.com/cometbft/cometbft/blob/main/spec/abci/abci%2B%2B_basic_concepts.md -func (app *BaseApp) ProcessProposal(req *abci.RequestProcessProposal) (resp *abci.ResponseProcessProposal, err error) { - if app.processProposal == nil { - return nil, errors.New("ProcessProposal handler not set") - } - - // CometBFT must never call ProcessProposal with a height of 0. - // Ref: https://github.com/cometbft/cometbft/blob/059798a4f5b0c9f52aa8655fa619054a0154088c/spec/core/state.md?plain=1#L37-L38 - if req.Height < 1 { - return nil, errors.New("ProcessProposal called with invalid height") - } - - // Always reset state given that ProcessProposal can timeout and be called - // again in a subsequent round. - header := cmtproto.Header{ - ChainID: app.chainID, - Height: req.Height, - Time: req.Time, - ProposerAddress: req.ProposerAddress, - NextValidatorsHash: req.NextValidatorsHash, - AppHash: app.LastCommitID().Hash, - } - app.setState(execModeProcessProposal, header) - - // Since the application can get access to FinalizeBlock state and write to it, - // we must be sure to reset it in case ProcessProposal timeouts and is called - // again in a subsequent round. However, we only want to do this after we've - // processed the first block, as we want to avoid overwriting the finalizeState - // after state changes during InitChain. - if req.Height > app.initialHeight { - // abort any running OE - app.optimisticExec.Abort() - app.setState(execModeFinalize, header) - } - - app.processProposalState.ctx = app.getContextForProposal(app.processProposalState.ctx, req.Height). - WithVoteInfos(req.ProposedLastCommit.Votes). // this is a set of votes that are not finalized yet, wait for commit - WithBlockHeight(req.Height). - WithBlockTime(req.Time). - WithHeaderHash(req.Hash). - WithProposer(req.ProposerAddress). - WithCometInfo(cometInfo{ProposerAddress: req.ProposerAddress, ValidatorsHash: req.NextValidatorsHash, Misbehavior: req.Misbehavior, LastCommit: req.ProposedLastCommit}). - WithExecMode(sdk.ExecModeProcessProposal). - WithHeaderInfo(coreheader.Info{ - ChainID: app.chainID, - Height: req.Height, - Time: req.Time, - }) - - app.processProposalState.ctx = app.processProposalState.ctx. - WithConsensusParams(app.GetConsensusParams(app.processProposalState.ctx)). - WithBlockGasMeter(app.getBlockGasMeter(app.processProposalState.ctx)) - - defer func() { - if err := recover(); err != nil { - app.logger.Error( - "panic recovered in ProcessProposal", - "height", req.Height, - "time", req.Time, - "hash", fmt.Sprintf("%X", req.Hash), - "panic", err, - ) - resp = &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_REJECT} - } - }() - - resp, err = app.processProposal(app.processProposalState.ctx, req) - if err != nil { - app.logger.Error("failed to process proposal", "height", req.Height, "time", req.Time, "hash", fmt.Sprintf("%X", req.Hash), "err", err) - return &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_REJECT}, nil - } - - // Only execute optimistic execution if the proposal is accepted, OE is - // enabled and the block height is greater than the initial height. During - // the first block we'll be carrying state from InitChain, so it would be - // impossible for us to easily revert. - // After the first block has been processed, the next blocks will get executed - // optimistically, so that when the ABCI client calls `FinalizeBlock` the app - // can have a response ready. - if resp.Status == abci.ResponseProcessProposal_ACCEPT && - app.optimisticExec.Enabled() && - req.Height > app.initialHeight { - app.optimisticExec.Execute(req) - } - - return resp, nil -} - -// ExtendVote implements the ExtendVote ABCI method and returns a ResponseExtendVote. -// It calls the application's ExtendVote handler which is responsible for performing -// application-specific business logic when sending a pre-commit for the NEXT -// block height. The extensions response may be non-deterministic but must always -// be returned, even if empty. -// -// Agreed upon vote extensions are made available to the proposer of the next -// height and are committed in the subsequent height, i.e. H+2. An error is -// returned if vote extensions are not enabled or if extendVote fails or panics. -func (app *BaseApp) ExtendVote(_ context.Context, req *abci.RequestExtendVote) (resp *abci.ResponseExtendVote, err error) { - // Always reset state given that ExtendVote and VerifyVoteExtension can timeout - // and be called again in a subsequent round. - var ctx sdk.Context - - // If we're extending the vote for the initial height, we need to use the - // finalizeBlockState context, otherwise we don't get the uncommitted data - // from InitChain. - if req.Height == app.initialHeight { - ctx, _ = app.finalizeBlockState.ctx.CacheContext() - } else { - emptyHeader := cmtproto.Header{ChainID: app.chainID, Height: req.Height} - ms := app.cms.CacheMultiStore() - ctx = sdk.NewContext(ms, emptyHeader, false, app.logger).WithStreamingManager(app.streamingManager) - } - - if app.extendVote == nil { - return nil, errors.New("application ExtendVote handler not set") - } - - // If vote extensions are not enabled, as a safety precaution, we return an - // error. - cp := app.GetConsensusParams(ctx) - - // Note: In this case, we do want to extend vote if the height is equal or - // greater than VoteExtensionsEnableHeight. This defers from the check done - // in ValidateVoteExtensions and PrepareProposal in which we'll check for - // vote extensions on VoteExtensionsEnableHeight+1. - extsEnabled := cp.Abci != nil && req.Height >= cp.Abci.VoteExtensionsEnableHeight && cp.Abci.VoteExtensionsEnableHeight != 0 - if !extsEnabled { - return nil, fmt.Errorf("vote extensions are not enabled; unexpected call to ExtendVote at height %d", req.Height) - } - - ctx = ctx. - WithConsensusParams(cp). - WithBlockGasMeter(storetypes.NewInfiniteGasMeter()). - WithBlockHeight(req.Height). - WithHeaderHash(req.Hash). - WithExecMode(sdk.ExecModeVoteExtension). - WithHeaderInfo(coreheader.Info{ - ChainID: app.chainID, - Height: req.Height, - Hash: req.Hash, - }) - - // add a deferred recover handler in case extendVote panics - defer func() { - if r := recover(); r != nil { - app.logger.Error( - "panic recovered in ExtendVote", - "height", req.Height, - "hash", fmt.Sprintf("%X", req.Hash), - "panic", err, - ) - err = fmt.Errorf("recovered application panic in ExtendVote: %v", r) - } - }() - - resp, err = app.extendVote(ctx, req) - if err != nil { - app.logger.Error("failed to extend vote", "height", req.Height, "hash", fmt.Sprintf("%X", req.Hash), "err", err) - return &abci.ResponseExtendVote{VoteExtension: []byte{}}, nil - } - - return resp, err -} - -// VerifyVoteExtension implements the VerifyVoteExtension ABCI method and returns -// a ResponseVerifyVoteExtension. It calls the applications' VerifyVoteExtension -// handler which is responsible for performing application-specific business -// logic in verifying a vote extension from another validator during the pre-commit -// phase. The response MUST be deterministic. An error is returned if vote -// extensions are not enabled or if verifyVoteExt fails or panics. -func (app *BaseApp) VerifyVoteExtension(req *abci.RequestVerifyVoteExtension) (resp *abci.ResponseVerifyVoteExtension, err error) { - if app.verifyVoteExt == nil { - return nil, errors.New("application VerifyVoteExtension handler not set") - } - - var ctx sdk.Context - - // If we're verifying the vote for the initial height, we need to use the - // finalizeBlockState context, otherwise we don't get the uncommitted data - // from InitChain. - if req.Height == app.initialHeight { - ctx, _ = app.finalizeBlockState.ctx.CacheContext() - } else { - emptyHeader := cmtproto.Header{ChainID: app.chainID, Height: req.Height} - ms := app.cms.CacheMultiStore() - ctx = sdk.NewContext(ms, emptyHeader, false, app.logger).WithStreamingManager(app.streamingManager) - } - - // If vote extensions are not enabled, as a safety precaution, we return an - // error. - cp := app.GetConsensusParams(ctx) - - // Note: we verify votes extensions on VoteExtensionsEnableHeight+1. Check - // comment in ExtendVote and ValidateVoteExtensions for more details. - extsEnabled := cp.Abci != nil && req.Height >= cp.Abci.VoteExtensionsEnableHeight && cp.Abci.VoteExtensionsEnableHeight != 0 - if !extsEnabled { - return nil, fmt.Errorf("vote extensions are not enabled; unexpected call to VerifyVoteExtension at height %d", req.Height) - } - - // add a deferred recover handler in case verifyVoteExt panics - defer func() { - if r := recover(); r != nil { - app.logger.Error( - "panic recovered in VerifyVoteExtension", - "height", req.Height, - "hash", fmt.Sprintf("%X", req.Hash), - "validator", fmt.Sprintf("%X", req.ValidatorAddress), - "panic", r, - ) - err = fmt.Errorf("recovered application panic in VerifyVoteExtension: %v", r) - } - }() - - resp, err = app.verifyVoteExt(ctx, req) - if err != nil { - app.logger.Error("failed to verify vote extension", "height", req.Height, "err", err) - return &abci.ResponseVerifyVoteExtension{Status: abci.ResponseVerifyVoteExtension_REJECT}, nil - } - - return resp, err -} - -// internalFinalizeBlock executes the block, called by the Optimistic -// Execution flow or by the FinalizeBlock ABCI method. The context received is -// only used to handle early cancellation, for anything related to state app.finalizeBlockState.ctx -// must be used. -func (app *BaseApp) internalFinalizeBlock(ctx context.Context, req *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { - var events []abci.Event - - if err := app.checkHalt(req.Height, req.Time); err != nil { - return nil, err - } - - if err := app.validateFinalizeBlockHeight(req); err != nil { - return nil, err - } - - if app.cms.TracingEnabled() { - app.cms.SetTracingContext(storetypes.TraceContext( - map[string]any{"blockHeight": req.Height}, - )) - } - - header := cmtproto.Header{ - ChainID: app.chainID, - Height: req.Height, - Time: req.Time, - ProposerAddress: req.ProposerAddress, - NextValidatorsHash: req.NextValidatorsHash, - AppHash: app.LastCommitID().Hash, - } - - // finalizeBlockState should be set on InitChain or ProcessProposal. If it is - // nil, it means we are replaying this block and we need to set the state here - // given that during block replay ProcessProposal is not executed by CometBFT. - if app.finalizeBlockState == nil { - app.setState(execModeFinalize, header) - } - - // Context is now updated with Header information. - app.finalizeBlockState.ctx = app.finalizeBlockState.ctx. - WithBlockHeader(header). - WithHeaderHash(req.Hash). - WithHeaderInfo(coreheader.Info{ - ChainID: app.chainID, - Height: req.Height, - Time: req.Time, - Hash: req.Hash, - AppHash: app.LastCommitID().Hash, - }). - WithConsensusParams(app.GetConsensusParams(app.finalizeBlockState.ctx)). - WithVoteInfos(req.DecidedLastCommit.Votes). - WithExecMode(sdk.ExecModeFinalize). - WithCometInfo(cometInfo{ - Misbehavior: req.Misbehavior, - ValidatorsHash: req.NextValidatorsHash, - ProposerAddress: req.ProposerAddress, - LastCommit: req.DecidedLastCommit, - }) - - // GasMeter must be set after we get a context with updated consensus params. - gasMeter := app.getBlockGasMeter(app.finalizeBlockState.ctx) - app.finalizeBlockState.ctx = app.finalizeBlockState.ctx.WithBlockGasMeter(gasMeter) - - if app.checkState != nil { - app.checkState.ctx = app.checkState.ctx. - WithBlockGasMeter(gasMeter). - WithHeaderHash(req.Hash) - } - - if err := app.preBlock(req); err != nil { - return nil, err - } - - beginBlock, err := app.beginBlock(req) - if err != nil { - return nil, err - } - - // First check for an abort signal after beginBlock, as it's the first place - // we spend any significant amount of time. - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - // continue - } - - events = append(events, beginBlock.Events...) - - // Iterate over all raw transactions in the proposal and attempt to execute - // them, gathering the execution results. - // - // NOTE: Not all raw transactions may adhere to the sdk.Tx interface, e.g. - // vote extensions, so skip those. - txResults := make([]*abci.ExecTxResult, 0, len(req.Txs)) - for _, rawTx := range req.Txs { - var response *abci.ExecTxResult - - if _, err := app.txDecoder(rawTx); err == nil { - response = app.deliverTx(rawTx) - } else { - // In the case where a transaction included in a block proposal is malformed, - // we still want to return a default response to comet. This is because comet - // expects a response for each transaction included in a block proposal. - response = sdkerrors.ResponseExecTxResultWithEvents( - sdkerrors.ErrTxDecode, - 0, - 0, - nil, - false, - ) - } - - // check after every tx if we should abort - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - // continue - } - - txResults = append(txResults, response) - } - - if app.finalizeBlockState.ms.TracingEnabled() { - app.finalizeBlockState.ms = app.finalizeBlockState.ms.SetTracingContext(nil).(storetypes.CacheMultiStore) - } - - endBlock, err := app.endBlock(app.finalizeBlockState.ctx) - if err != nil { - return nil, err - } - - // check after endBlock if we should abort, to avoid propagating the result - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - // continue - } - - events = append(events, endBlock.Events...) - cp := app.GetConsensusParams(app.finalizeBlockState.ctx) - - return &abci.ResponseFinalizeBlock{ - Events: events, - TxResults: txResults, - ValidatorUpdates: endBlock.ValidatorUpdates, - ConsensusParamUpdates: &cp, - }, nil -} - -// FinalizeBlock will execute the block proposal provided by RequestFinalizeBlock. -// Specifically, it will execute an application's BeginBlock (if defined), followed -// by the transactions in the proposal, finally followed by the application's -// EndBlock (if defined). -// -// For each raw transaction, i.e. a byte slice, BaseApp will only execute it if -// it adheres to the sdk.Tx interface. Otherwise, the raw transaction will be -// skipped. This is to support compatibility with proposers injecting vote -// extensions into the proposal, which should not themselves be executed in cases -// where they adhere to the sdk.Tx interface. -func (app *BaseApp) FinalizeBlock(req *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { - if app.optimisticExec.Initialized() { - // check if the hash we got is the same as the one we are executing - aborted := app.optimisticExec.AbortIfNeeded(req.Hash) - // Wait for the OE to finish, regardless of whether it was aborted or not - res, err := app.optimisticExec.WaitResult() - - // only return if we are not aborting - if !aborted { - if res != nil { - res.AppHash = app.workingHash() - } - return res, err - } - - // if it was aborted, we need to reset the state - app.finalizeBlockState = nil - app.optimisticExec.Reset() - } - - // if no OE is running, just run the block (this is either a block replay or a OE that got aborted) - res, err := app.internalFinalizeBlock(context.Background(), req) - if res != nil { - res.AppHash = app.workingHash() - } - return res, err -} - -// checkHalt checkes if height or time exceeds halt-height or halt-time respectively. -func (app *BaseApp) checkHalt(height int64, time time.Time) error { - var halt bool - switch { - case app.haltHeight > 0 && uint64(height) > app.haltHeight: - halt = true - - case app.haltTime > 0 && time.Unix() > int64(app.haltTime): - halt = true - } - - if halt { - return fmt.Errorf("halt per configuration height %d time %d", app.haltHeight, app.haltTime) - } - - return nil -} - -// Commit implements the ABCI interface. It will commit all state that exists in -// the deliver state's multi-store and includes the resulting commit ID in the -// returned abci.ResponseCommit. Commit will set the check state based on the -// latest header and reset the deliver state. Also, if a non-zero halt height is -// defined in config, Commit will execute a deferred function call to check -// against that height and gracefully halt if it matches the latest committed -// height. -func (app *BaseApp) Commit() (*abci.ResponseCommit, error) { - header := app.finalizeBlockState.ctx.BlockHeader() - retainHeight := app.GetBlockRetentionHeight(header.Height) - - if app.precommiter != nil { - app.precommiter(app.finalizeBlockState.ctx) - } - - rms, ok := app.cms.(*rootmulti.Store) - if ok { - rms.SetCommitHeader(header) - } - - app.cms.Commit() - - resp := &abci.ResponseCommit{ - RetainHeight: retainHeight, - } - - abciListeners := app.streamingManager.ABCIListeners - if len(abciListeners) > 0 { - ctx := app.finalizeBlockState.ctx - blockHeight := ctx.BlockHeight() - changeSet := app.cms.PopStateCache() - - for _, abciListener := range abciListeners { - if err := abciListener.ListenCommit(ctx, *resp, changeSet); err != nil { - app.logger.Error("Commit listening hook failed", "height", blockHeight, "err", err) - } - } - } - - // Reset the CheckTx state to the latest committed. - // - // NOTE: This is safe because CometBFT holds a lock on the mempool for - // Commit. Use the header from this latest block. - app.setState(execModeCheck, header) - - app.finalizeBlockState = nil - - if app.prepareCheckStater != nil { - app.prepareCheckStater(app.checkState.ctx) - } - - // The SnapshotIfApplicable method will create the snapshot by starting the goroutine - app.snapshotManager.SnapshotIfApplicable(header.Height) - - return resp, nil -} - -// workingHash gets the apphash that will be finalized in commit. -// These writes will be persisted to the root multi-store (app.cms) and flushed to -// disk in the Commit phase. This means when the ABCI client requests Commit(), the application -// state transitions will be flushed to disk and as a result, but we already have -// an application Merkle root. -func (app *BaseApp) workingHash() []byte { - // Write the FinalizeBlock state into branched storage and commit the MultiStore. - // The write to the FinalizeBlock state writes all state transitions to the root - // MultiStore (app.cms) so when Commit() is called it persists those values. - app.finalizeBlockState.ms.Write() - - // Get the hash of all writes in order to return the apphash to the comet in finalizeBlock. - commitHash := app.cms.WorkingHash() - app.logger.Debug("hash of all writes", "workingHash", fmt.Sprintf("%X", commitHash)) - - return commitHash -} - -func handleQueryApp(app *BaseApp, path []string, req *abci.RequestQuery) *abci.ResponseQuery { - if len(path) >= 2 { - switch path[1] { - case "simulate": - txBytes := req.Data - - gInfo, res, err := app.Simulate(txBytes) - if err != nil { - return sdkerrors.QueryResult(errorsmod.Wrap(err, "failed to simulate tx"), app.trace) - } - - simRes := &sdk.SimulationResponse{ - GasInfo: gInfo, - Result: res, - } - - bz, err := codec.ProtoMarshalJSON(simRes, app.interfaceRegistry) - if err != nil { - return sdkerrors.QueryResult(errorsmod.Wrap(err, "failed to JSON encode simulation response"), app.trace) - } - - return &abci.ResponseQuery{ - Codespace: sdkerrors.RootCodespace, - Height: req.Height, - Value: bz, - } - - case "version": - return &abci.ResponseQuery{ - Codespace: sdkerrors.RootCodespace, - Height: req.Height, - Value: []byte(app.version), - } - - default: - return sdkerrors.QueryResult(errorsmod.Wrapf(sdkerrors.ErrUnknownRequest, "unknown query: %s", path), app.trace) - } - } - - return sdkerrors.QueryResult( - errorsmod.Wrap( - sdkerrors.ErrUnknownRequest, - "expected second parameter to be either 'simulate' or 'version', neither was present", - ), app.trace) -} - -func handleQueryStore(app *BaseApp, path []string, req abci.RequestQuery) *abci.ResponseQuery { - // "/store" prefix for store queries - queryable, ok := app.cms.(storetypes.Queryable) - if !ok { - return sdkerrors.QueryResult(errorsmod.Wrap(sdkerrors.ErrUnknownRequest, "multi-store does not support queries"), app.trace) - } - - req.Path = "/" + strings.Join(path[1:], "/") - - if req.Height <= 1 && req.Prove { - return sdkerrors.QueryResult( - errorsmod.Wrap( - sdkerrors.ErrInvalidRequest, - "cannot query with proof when height <= 1; please provide a valid height", - ), app.trace) - } - - sdkReq := storetypes.RequestQuery(req) - resp, err := queryable.Query(&sdkReq) - if err != nil { - return sdkerrors.QueryResult(err, app.trace) - } - resp.Height = req.Height - - abciResp := abci.ResponseQuery(*resp) - - return &abciResp -} - -func handleQueryP2P(app *BaseApp, path []string) *abci.ResponseQuery { - // "/p2p" prefix for p2p queries - if len(path) < 4 { - return sdkerrors.QueryResult(errorsmod.Wrap(sdkerrors.ErrUnknownRequest, "path should be p2p filter "), app.trace) - } - - var resp *abci.ResponseQuery - - cmd, typ, arg := path[1], path[2], path[3] - switch cmd { - case "filter": - switch typ { - case "addr": - resp = app.FilterPeerByAddrPort(arg) - - case "id": - resp = app.FilterPeerByID(arg) - } - - default: - resp = sdkerrors.QueryResult(errorsmod.Wrap(sdkerrors.ErrUnknownRequest, "expected second parameter to be 'filter'"), app.trace) - } - - return resp -} - -// SplitABCIQueryPath splits a string path using the delimiter '/'. -// -// e.g. "this/is/funny" becomes []string{"this", "is", "funny"} -func SplitABCIQueryPath(requestPath string) (path []string) { - path = strings.Split(requestPath, "/") - - // first element is empty string - if len(path) > 0 && path[0] == "" { - path = path[1:] - } - - return path -} - -// FilterPeerByAddrPort filters peers by address/port. -func (app *BaseApp) FilterPeerByAddrPort(info string) *abci.ResponseQuery { - if app.addrPeerFilter != nil { - return app.addrPeerFilter(info) - } - - return &abci.ResponseQuery{} -} - -// FilterPeerByID filters peers by node ID. -func (app *BaseApp) FilterPeerByID(info string) *abci.ResponseQuery { - if app.idPeerFilter != nil { - return app.idPeerFilter(info) - } - - return &abci.ResponseQuery{} -} - -// getContextForProposal returns the correct Context for PrepareProposal and -// ProcessProposal. We use finalizeBlockState on the first block to be able to -// access any state changes made in InitChain. -func (app *BaseApp) getContextForProposal(ctx sdk.Context, height int64) sdk.Context { - if height == app.initialHeight { - ctx, _ = app.finalizeBlockState.ctx.CacheContext() - - // clear all context data set during InitChain to avoid inconsistent behavior - ctx = ctx.WithBlockHeader(cmtproto.Header{}).WithHeaderInfo(coreheader.Info{}) - return ctx - } - - return ctx -} - -func (app *BaseApp) handleQueryGRPC(handler GRPCQueryHandler, req *abci.RequestQuery) *abci.ResponseQuery { - ctx, err := app.CreateQueryContext(req.Height, req.Prove) - if err != nil { - return sdkerrors.QueryResult(err, app.trace) - } - - resp, err := handler(ctx, req) - if err != nil { - resp = sdkerrors.QueryResult(gRPCErrorToSDKError(err), app.trace) - resp.Height = req.Height - return resp - } - - return resp -} - -func gRPCErrorToSDKError(err error) error { - status, ok := grpcstatus.FromError(err) - if !ok { - return errorsmod.Wrap(sdkerrors.ErrInvalidRequest, err.Error()) - } - - switch status.Code() { - case codes.NotFound: - return errorsmod.Wrap(sdkerrors.ErrKeyNotFound, err.Error()) - - case codes.InvalidArgument: - return errorsmod.Wrap(sdkerrors.ErrInvalidRequest, err.Error()) - - case codes.FailedPrecondition: - return errorsmod.Wrap(sdkerrors.ErrInvalidRequest, err.Error()) - - case codes.Unauthenticated: - return errorsmod.Wrap(sdkerrors.ErrUnauthorized, err.Error()) - - default: - return errorsmod.Wrap(sdkerrors.ErrUnknownRequest, err.Error()) - } -} - -func checkNegativeHeight(height int64) error { - if height < 0 { - return errorsmod.Wrap(sdkerrors.ErrInvalidRequest, "cannot query with height < 0; please provide a valid height") - } - - return nil -} - -// createQueryContext creates a new sdk.Context for a query, taking as args -// the block height and whether the query needs a proof or not. -func (app *BaseApp) CreateQueryContext(height int64, prove bool) (sdk.Context, error) { - if err := checkNegativeHeight(height); err != nil { - return sdk.Context{}, err - } - - // use custom query multi-store if provided - qms := app.qms - if qms == nil { - qms = app.cms.(storetypes.MultiStore) - } - - lastBlockHeight := qms.LatestVersion() - if lastBlockHeight == 0 { - return sdk.Context{}, errorsmod.Wrapf(sdkerrors.ErrInvalidHeight, "%s is not ready; please wait for first block", app.Name()) - } - - if height > lastBlockHeight { - return sdk.Context{}, - errorsmod.Wrap( - sdkerrors.ErrInvalidHeight, - "cannot query with height in the future; please provide a valid height", - ) - } - - // when a client did not provide a query height, manually inject the latest - if height == 0 { - height = lastBlockHeight - } - - if height <= 1 && prove { - return sdk.Context{}, - errorsmod.Wrap( - sdkerrors.ErrInvalidRequest, - "cannot query with proof when height <= 1; please provide a valid height", - ) - } - - cacheMS, err := qms.CacheMultiStoreWithVersion(height) - if err != nil { - return sdk.Context{}, - errorsmod.Wrapf( - sdkerrors.ErrInvalidRequest, - "failed to load state at height %d; %s (latest height: %d)", height, err, lastBlockHeight, - ) - } - - // branch the commit multi-store for safety - ctx := sdk.NewContext(cacheMS, app.checkState.ctx.BlockHeader(), true, app.logger). - WithMinGasPrices(app.minGasPrices). - WithBlockHeight(height). - WithGasMeter(storetypes.NewGasMeter(app.queryGasLimit)) - - if height != lastBlockHeight { - rms, ok := app.cms.(*rootmulti.Store) - if ok { - cInfo, err := rms.GetCommitInfo(height) - if cInfo != nil && err == nil { - ctx = ctx.WithBlockTime(cInfo.Timestamp) - } - } - } - - return ctx, nil -} - -// GetBlockRetentionHeight returns the height for which all blocks below this height -// are pruned from CometBFT. Given a commitment height and a non-zero local -// minRetainBlocks configuration, the retentionHeight is the smallest height that -// satisfies: -// -// - Unbonding (safety threshold) time: The block interval in which validators -// can be economically punished for misbehavior. Blocks in this interval must be -// auditable e.g. by the light client. -// -// - Logical store snapshot interval: The block interval at which the underlying -// logical store database is persisted to disk, e.g. every 10000 heights. Blocks -// since the last IAVL snapshot must be available for replay on application restart. -// -// - State sync snapshots: Blocks since the oldest available snapshot must be -// available for state sync nodes to catch up (oldest because a node may be -// restoring an old snapshot while a new snapshot was taken). -// -// - Local (minRetainBlocks) config: Archive nodes may want to retain more or -// all blocks, e.g. via a local config option min-retain-blocks. There may also -// be a need to vary retention for other nodes, e.g. sentry nodes which do not -// need historical blocks. -func (app *BaseApp) GetBlockRetentionHeight(commitHeight int64) int64 { - // pruning is disabled if minRetainBlocks is zero - if app.minRetainBlocks == 0 { - return 0 - } - - minNonZero := func(x, y int64) int64 { - switch { - case x == 0: - return y - - case y == 0: - return x - - case x < y: - return x - - default: - return y - } - } - - // Define retentionHeight as the minimum value that satisfies all non-zero - // constraints. All blocks below (commitHeight-retentionHeight) are pruned - // from CometBFT. - var retentionHeight int64 - - // Define the number of blocks needed to protect against misbehaving validators - // which allows light clients to operate safely. Note, we piggy back of the - // evidence parameters instead of computing an estimated number of blocks based - // on the unbonding period and block commitment time as the two should be - // equivalent. - cp := app.GetConsensusParams(app.finalizeBlockState.ctx) - if cp.Evidence != nil && cp.Evidence.MaxAgeNumBlocks > 0 { - retentionHeight = commitHeight - cp.Evidence.MaxAgeNumBlocks - } - - if app.snapshotManager != nil { - snapshotRetentionHeights := app.snapshotManager.GetSnapshotBlockRetentionHeights() - if snapshotRetentionHeights > 0 { - retentionHeight = minNonZero(retentionHeight, commitHeight-snapshotRetentionHeights) - } - } - - v := commitHeight - int64(app.minRetainBlocks) - retentionHeight = minNonZero(retentionHeight, v) - - if retentionHeight <= 0 { - // prune nothing in the case of a non-positive height - return 0 - } - - return retentionHeight -} - -// toVoteInfo converts the new ExtendedVoteInfo to VoteInfo. -func toVoteInfo(votes []abci.ExtendedVoteInfo) []abci.VoteInfo { - legacyVotes := make([]abci.VoteInfo, len(votes)) - for i, vote := range votes { - legacyVotes[i] = abci.VoteInfo{ - Validator: abci.Validator{ - Address: vote.Validator.Address, - Power: vote.Validator.Power, - }, - BlockIdFlag: vote.BlockIdFlag, - } - } - - return legacyVotes -} diff --git a/baseapp/abci_test.go b/baseapp/abci_test.go deleted file mode 100644 index 179c732acd..0000000000 --- a/baseapp/abci_test.go +++ /dev/null @@ -1,2355 +0,0 @@ -package baseapp_test - -import ( - "bytes" - "context" - "encoding/binary" - "encoding/hex" - "errors" - "fmt" - "math/rand" - "strconv" - "strings" - "testing" - "time" - - abci "github.com/cometbft/cometbft/abci/types" - "github.com/cometbft/cometbft/crypto/secp256k1" - cmtprotocrypto "github.com/cometbft/cometbft/proto/tendermint/crypto" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" - dbm "github.com/cosmos/cosmos-db" - protoio "github.com/cosmos/gogoproto/io" - "github.com/cosmos/gogoproto/jsonpb" - "github.com/cosmos/gogoproto/proto" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/require" - - errorsmod "cosmossdk.io/errors" - "cosmossdk.io/log" - pruningtypes "cosmossdk.io/store/pruning/types" - "cosmossdk.io/store/snapshots" - snapshottypes "cosmossdk.io/store/snapshots/types" - storetypes "cosmossdk.io/store/types" - - "github.com/cosmos/cosmos-sdk/baseapp" - baseapptestutil "github.com/cosmos/cosmos-sdk/baseapp/testutil" - "github.com/cosmos/cosmos-sdk/baseapp/testutil/mock" - "github.com/cosmos/cosmos-sdk/testutil" - "github.com/cosmos/cosmos-sdk/testutil/testdata" - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" - "github.com/cosmos/cosmos-sdk/types/mempool" - "github.com/cosmos/cosmos-sdk/x/auth/signing" -) - -func TestABCI_Info(t *testing.T) { - suite := NewBaseAppSuite(t) - - reqInfo := abci.RequestInfo{} - res, err := suite.baseApp.Info(&reqInfo) - require.NoError(t, err) - - require.Equal(t, "", res.Version) - require.Equal(t, t.Name(), res.GetData()) - require.Equal(t, int64(0), res.LastBlockHeight) - require.Equal(t, []uint8(nil), res.LastBlockAppHash) - require.Equal(t, suite.baseApp.AppVersion(), res.AppVersion) -} - -func TestABCI_First_block_Height(t *testing.T) { - suite := NewBaseAppSuite(t, baseapp.SetChainID("test-chain-id")) - app := suite.baseApp - - _, err := app.InitChain(&abci.RequestInitChain{ - ChainId: "test-chain-id", - ConsensusParams: &cmtproto.ConsensusParams{Block: &cmtproto.BlockParams{MaxGas: 5000000}}, - InitialHeight: 1, - }) - require.NoError(t, err) - _, err = app.Commit() - require.NoError(t, err) - - ctx := app.GetContextForCheckTx(nil) - require.Equal(t, int64(1), ctx.BlockHeight()) -} - -func TestABCI_InitChain(t *testing.T) { - name := t.Name() - db := dbm.NewMemDB() - logger := log.NewTestLogger(t) - app := baseapp.NewBaseApp(name, logger, db, nil, baseapp.SetChainID("test-chain-id")) - - capKey := storetypes.NewKVStoreKey("main") - capKey2 := storetypes.NewKVStoreKey("key2") - app.MountStores(capKey, capKey2) - - // set a value in the store on init chain - key, value := []byte("hello"), []byte("goodbye") - var initChainer sdk.InitChainer = func(ctx sdk.Context, req *abci.RequestInitChain) (*abci.ResponseInitChain, error) { - store := ctx.KVStore(capKey) - store.Set(key, value) - return &abci.ResponseInitChain{}, nil - } - - query := abci.RequestQuery{ - Path: "/store/main/key", - Data: key, - } - - // initChain is nil and chain ID is wrong - errors - _, err := app.InitChain(&abci.RequestInitChain{ChainId: "wrong-chain-id"}) - require.Error(t, err) - - // initChain is nil - nothing happens - _, err = app.InitChain(&abci.RequestInitChain{ChainId: "test-chain-id"}) - require.NoError(t, err) - resQ, err := app.Query(context.TODO(), &query) - require.NoError(t, err) - require.Equal(t, 0, len(resQ.Value)) - - // set initChainer and try again - should see the value - app.SetInitChainer(initChainer) - - // stores are mounted and private members are set - sealing baseapp - err = app.LoadLatestVersion() // needed to make stores non-nil - require.Nil(t, err) - require.Equal(t, int64(0), app.LastBlockHeight()) - - initChainRes, err := app.InitChain(&abci.RequestInitChain{AppStateBytes: []byte("{}"), ChainId: "test-chain-id"}) // must have valid JSON genesis file, even if empty - require.NoError(t, err) - - // The AppHash returned by a new chain is the sha256 hash of "". - // $ echo -n '' | sha256sum - // e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 - apphash, err := hex.DecodeString("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855") - require.NoError(t, err) - - require.Equal( - t, - apphash, - initChainRes.AppHash, - ) - - // assert that chainID is set correctly in InitChain - chainID := getFinalizeBlockStateCtx(app).ChainID() - require.Equal(t, "test-chain-id", chainID, "ChainID in deliverState not set correctly in InitChain") - - chainID = getCheckStateCtx(app).ChainID() - require.Equal(t, "test-chain-id", chainID, "ChainID in checkState not set correctly in InitChain") - - _, err = app.FinalizeBlock(&abci.RequestFinalizeBlock{ - Hash: initChainRes.AppHash, - Height: 1, - }) - require.NoError(t, err) - - _, err = app.Commit() - require.NoError(t, err) - - resQ, err = app.Query(context.TODO(), &query) - require.NoError(t, err) - require.Equal(t, int64(1), app.LastBlockHeight()) - require.Equal(t, value, resQ.Value) - - // reload app - app = baseapp.NewBaseApp(name, logger, db, nil) - app.SetInitChainer(initChainer) - app.MountStores(capKey, capKey2) - err = app.LoadLatestVersion() // needed to make stores non-nil - require.Nil(t, err) - require.Equal(t, int64(1), app.LastBlockHeight()) - - // ensure we can still query after reloading - resQ, err = app.Query(context.TODO(), &query) - require.NoError(t, err) - require.Equal(t, value, resQ.Value) - - // commit and ensure we can still query - _, err = app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: app.LastBlockHeight() + 1}) - require.NoError(t, err) - _, err = app.Commit() - require.NoError(t, err) - - resQ, err = app.Query(context.TODO(), &query) - require.NoError(t, err) - require.Equal(t, value, resQ.Value) -} - -func TestABCI_InitChain_WithInitialHeight(t *testing.T) { - name := t.Name() - db := dbm.NewMemDB() - app := baseapp.NewBaseApp(name, log.NewTestLogger(t), db, nil) - - _, err := app.InitChain( - &abci.RequestInitChain{ - InitialHeight: 3, - }, - ) - require.NoError(t, err) - _, err = app.Commit() - require.NoError(t, err) - - require.Equal(t, int64(3), app.LastBlockHeight()) -} - -func TestABCI_FinalizeBlock_WithInitialHeight(t *testing.T) { - name := t.Name() - db := dbm.NewMemDB() - app := baseapp.NewBaseApp(name, log.NewTestLogger(t), db, nil) - - _, err := app.InitChain( - &abci.RequestInitChain{ - InitialHeight: 3, - }, - ) - require.NoError(t, err) - - _, err = app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 4}) - require.Error(t, err, "invalid height: 4; expected: 3") - - _, err = app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 3}) - require.NoError(t, err) - _, err = app.Commit() - require.NoError(t, err) - - require.Equal(t, int64(3), app.LastBlockHeight()) -} - -func TestABCI_FinalizeBlock_WithBeginAndEndBlocker(t *testing.T) { - name := t.Name() - db := dbm.NewMemDB() - app := baseapp.NewBaseApp(name, log.NewTestLogger(t), db, nil) - - app.SetBeginBlocker(func(ctx sdk.Context) (sdk.BeginBlock, error) { - return sdk.BeginBlock{ - Events: []abci.Event{ - { - Type: "sometype", - Attributes: []abci.EventAttribute{ - { - Key: "foo", - Value: "bar", - }, - }, - }, - }, - }, nil - }) - - app.SetEndBlocker(func(ctx sdk.Context) (sdk.EndBlock, error) { - return sdk.EndBlock{ - Events: []abci.Event{ - { - Type: "anothertype", - Attributes: []abci.EventAttribute{ - { - Key: "foo", - Value: "bar", - }, - }, - }, - }, - }, nil - }) - - _, err := app.InitChain( - &abci.RequestInitChain{ - InitialHeight: 1, - }, - ) - require.NoError(t, err) - - res, err := app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1}) - require.NoError(t, err) - - require.Len(t, res.Events, 2) - - require.Equal(t, "sometype", res.Events[0].Type) - require.Equal(t, "foo", res.Events[0].Attributes[0].Key) - require.Equal(t, "bar", res.Events[0].Attributes[0].Value) - require.Equal(t, "mode", res.Events[0].Attributes[1].Key) - require.Equal(t, "BeginBlock", res.Events[0].Attributes[1].Value) - - require.Equal(t, "anothertype", res.Events[1].Type) - require.Equal(t, "foo", res.Events[1].Attributes[0].Key) - require.Equal(t, "bar", res.Events[1].Attributes[0].Value) - require.Equal(t, "mode", res.Events[1].Attributes[1].Key) - require.Equal(t, "EndBlock", res.Events[1].Attributes[1].Value) - - _, err = app.Commit() - require.NoError(t, err) - - require.Equal(t, int64(1), app.LastBlockHeight()) -} - -func TestABCI_ExtendVote(t *testing.T) { - name := t.Name() - db := dbm.NewMemDB() - app := baseapp.NewBaseApp(name, log.NewTestLogger(t), db, nil) - - app.SetExtendVoteHandler(func(ctx sdk.Context, req *abci.RequestExtendVote) (*abci.ResponseExtendVote, error) { - voteExt := "foo" + hex.EncodeToString(req.Hash) + strconv.FormatInt(req.Height, 10) - return &abci.ResponseExtendVote{VoteExtension: []byte(voteExt)}, nil - }) - - app.SetVerifyVoteExtensionHandler(func(ctx sdk.Context, req *abci.RequestVerifyVoteExtension) (*abci.ResponseVerifyVoteExtension, error) { - // do some kind of verification here - expectedVoteExt := "foo" + hex.EncodeToString(req.Hash) + strconv.FormatInt(req.Height, 10) - if !bytes.Equal(req.VoteExtension, []byte(expectedVoteExt)) { - return &abci.ResponseVerifyVoteExtension{Status: abci.ResponseVerifyVoteExtension_REJECT}, nil - } - - return &abci.ResponseVerifyVoteExtension{Status: abci.ResponseVerifyVoteExtension_ACCEPT}, nil - }) - - app.SetParamStore(¶mStore{db: dbm.NewMemDB()}) - _, err := app.InitChain( - &abci.RequestInitChain{ - InitialHeight: 1, - ConsensusParams: &cmtproto.ConsensusParams{ - Abci: &cmtproto.ABCIParams{ - VoteExtensionsEnableHeight: 200, - }, - }, - }, - ) - require.NoError(t, err) - - // Votes not enabled yet - _, err = app.ExtendVote(context.Background(), &abci.RequestExtendVote{Height: 123, Hash: []byte("thehash")}) - require.ErrorContains(t, err, "vote extensions are not enabled") - - // First vote on the first enabled height - res, err := app.ExtendVote(context.Background(), &abci.RequestExtendVote{Height: 200, Hash: []byte("thehash")}) - require.NoError(t, err) - require.Len(t, res.VoteExtension, 20) - - res, err = app.ExtendVote(context.Background(), &abci.RequestExtendVote{Height: 1000, Hash: []byte("thehash")}) - require.NoError(t, err) - require.Len(t, res.VoteExtension, 21) - - // Error during vote extension should return an empty vote extension and no error - app.SetExtendVoteHandler(func(ctx sdk.Context, req *abci.RequestExtendVote) (*abci.ResponseExtendVote, error) { - return nil, errors.New("some error") - }) - res, err = app.ExtendVote(context.Background(), &abci.RequestExtendVote{Height: 1000, Hash: []byte("thehash")}) - require.NoError(t, err) - require.Len(t, res.VoteExtension, 0) - - // Verify Vote Extensions - _, err = app.VerifyVoteExtension(&abci.RequestVerifyVoteExtension{Height: 123, VoteExtension: []byte("1234567")}) - require.ErrorContains(t, err, "vote extensions are not enabled") - - // First vote on the first enabled height - vres, err := app.VerifyVoteExtension(&abci.RequestVerifyVoteExtension{Height: 200, Hash: []byte("thehash"), VoteExtension: []byte("foo74686568617368200")}) - require.NoError(t, err) - require.Equal(t, abci.ResponseVerifyVoteExtension_ACCEPT, vres.Status) - - vres, err = app.VerifyVoteExtension(&abci.RequestVerifyVoteExtension{Height: 1000, Hash: []byte("thehash"), VoteExtension: []byte("foo746865686173681000")}) - require.NoError(t, err) - require.Equal(t, abci.ResponseVerifyVoteExtension_ACCEPT, vres.Status) - - // Reject because it's just some random bytes - vres, err = app.VerifyVoteExtension(&abci.RequestVerifyVoteExtension{Height: 201, Hash: []byte("thehash"), VoteExtension: []byte("12345678")}) - require.NoError(t, err) - require.Equal(t, abci.ResponseVerifyVoteExtension_REJECT, vres.Status) - - // Reject because the verification failed (no error) - app.SetVerifyVoteExtensionHandler(func(ctx sdk.Context, req *abci.RequestVerifyVoteExtension) (*abci.ResponseVerifyVoteExtension, error) { - return nil, errors.New("some error") - }) - vres, err = app.VerifyVoteExtension(&abci.RequestVerifyVoteExtension{Height: 201, Hash: []byte("thehash"), VoteExtension: []byte("12345678")}) - require.NoError(t, err) - require.Equal(t, abci.ResponseVerifyVoteExtension_REJECT, vres.Status) -} - -// TestABCI_OnlyVerifyVoteExtension makes sure we can call VerifyVoteExtension -// without having called ExtendVote before. -func TestABCI_OnlyVerifyVoteExtension(t *testing.T) { - name := t.Name() - db := dbm.NewMemDB() - app := baseapp.NewBaseApp(name, log.NewTestLogger(t), db, nil) - - app.SetVerifyVoteExtensionHandler(func(ctx sdk.Context, req *abci.RequestVerifyVoteExtension) (*abci.ResponseVerifyVoteExtension, error) { - // do some kind of verification here - expectedVoteExt := "foo" + hex.EncodeToString(req.Hash) + strconv.FormatInt(req.Height, 10) - if !bytes.Equal(req.VoteExtension, []byte(expectedVoteExt)) { - return &abci.ResponseVerifyVoteExtension{Status: abci.ResponseVerifyVoteExtension_REJECT}, nil - } - - return &abci.ResponseVerifyVoteExtension{Status: abci.ResponseVerifyVoteExtension_ACCEPT}, nil - }) - - app.SetParamStore(¶mStore{db: dbm.NewMemDB()}) - _, err := app.InitChain( - &abci.RequestInitChain{ - InitialHeight: 1, - ConsensusParams: &cmtproto.ConsensusParams{ - Abci: &cmtproto.ABCIParams{ - VoteExtensionsEnableHeight: 200, - }, - }, - }, - ) - require.NoError(t, err) - - // Verify Vote Extensions - _, err = app.VerifyVoteExtension(&abci.RequestVerifyVoteExtension{Height: 123, VoteExtension: []byte("1234567")}) - require.ErrorContains(t, err, "vote extensions are not enabled") - - // First vote on the first enabled height - vres, err := app.VerifyVoteExtension(&abci.RequestVerifyVoteExtension{Height: 200, Hash: []byte("thehash"), VoteExtension: []byte("foo74686568617368200")}) - require.NoError(t, err) - require.Equal(t, abci.ResponseVerifyVoteExtension_ACCEPT, vres.Status) - - vres, err = app.VerifyVoteExtension(&abci.RequestVerifyVoteExtension{Height: 1000, Hash: []byte("thehash"), VoteExtension: []byte("foo746865686173681000")}) - require.NoError(t, err) - require.Equal(t, abci.ResponseVerifyVoteExtension_ACCEPT, vres.Status) - - // Reject because it's just some random bytes - vres, err = app.VerifyVoteExtension(&abci.RequestVerifyVoteExtension{Height: 201, Hash: []byte("thehash"), VoteExtension: []byte("12345678")}) - require.NoError(t, err) - require.Equal(t, abci.ResponseVerifyVoteExtension_REJECT, vres.Status) - - // Reject because the verification failed (no error) - app.SetVerifyVoteExtensionHandler(func(ctx sdk.Context, req *abci.RequestVerifyVoteExtension) (*abci.ResponseVerifyVoteExtension, error) { - return nil, errors.New("some error") - }) - vres, err = app.VerifyVoteExtension(&abci.RequestVerifyVoteExtension{Height: 201, Hash: []byte("thehash"), VoteExtension: []byte("12345678")}) - require.NoError(t, err) - require.Equal(t, abci.ResponseVerifyVoteExtension_REJECT, vres.Status) -} - -func TestABCI_GRPCQuery(t *testing.T) { - grpcQueryOpt := func(bapp *baseapp.BaseApp) { - testdata.RegisterQueryServer( - bapp.GRPCQueryRouter(), - testdata.QueryImpl{}, - ) - } - - suite := NewBaseAppSuite(t, grpcQueryOpt) - - _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ - ConsensusParams: &cmtproto.ConsensusParams{}, - }) - require.NoError(t, err) - - req := testdata.SayHelloRequest{Name: "foo"} - reqBz, err := req.Marshal() - require.NoError(t, err) - - resQuery, err := suite.baseApp.Query(context.TODO(), &abci.RequestQuery{ - Data: reqBz, - Path: "/testpb.Query/SayHello", - }) - require.NoError(t, err) - require.Equal(t, sdkerrors.ErrInvalidHeight.ABCICode(), resQuery.Code, resQuery) - require.Contains(t, resQuery.Log, "TestABCI_GRPCQuery is not ready; please wait for first block") - - _, err = suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{Height: suite.baseApp.LastBlockHeight() + 1}) - require.NoError(t, err) - _, err = suite.baseApp.Commit() - require.NoError(t, err) - - reqQuery := abci.RequestQuery{ - Data: reqBz, - Path: "/testpb.Query/SayHello", - } - - resQuery, err = suite.baseApp.Query(context.TODO(), &reqQuery) - require.NoError(t, err) - require.Equal(t, abci.CodeTypeOK, resQuery.Code, resQuery) - - var res testdata.SayHelloResponse - require.NoError(t, res.Unmarshal(resQuery.Value)) - require.Equal(t, "Hello foo!", res.Greeting) -} - -func TestABCI_P2PQuery(t *testing.T) { - addrPeerFilterOpt := func(bapp *baseapp.BaseApp) { - bapp.SetAddrPeerFilter(func(addrport string) *abci.ResponseQuery { - require.Equal(t, "1.1.1.1:8000", addrport) - return &abci.ResponseQuery{Code: uint32(3)} - }) - } - - idPeerFilterOpt := func(bapp *baseapp.BaseApp) { - bapp.SetIDPeerFilter(func(id string) *abci.ResponseQuery { - require.Equal(t, "testid", id) - return &abci.ResponseQuery{Code: uint32(4)} - }) - } - - suite := NewBaseAppSuite(t, addrPeerFilterOpt, idPeerFilterOpt) - - addrQuery := abci.RequestQuery{ - Path: "/p2p/filter/addr/1.1.1.1:8000", - } - res, err := suite.baseApp.Query(context.TODO(), &addrQuery) - require.NoError(t, err) - require.Equal(t, uint32(3), res.Code) - - idQuery := abci.RequestQuery{ - Path: "/p2p/filter/id/testid", - } - res, err = suite.baseApp.Query(context.TODO(), &idQuery) - require.NoError(t, err) - require.Equal(t, uint32(4), res.Code) -} - -func TestBaseApp_PrepareCheckState(t *testing.T) { - db := dbm.NewMemDB() - name := t.Name() - logger := log.NewTestLogger(t) - - cp := &cmtproto.ConsensusParams{ - Block: &cmtproto.BlockParams{ - MaxGas: 5000000, - }, - } - - app := baseapp.NewBaseApp(name, logger, db, nil) - app.SetParamStore(¶mStore{db: dbm.NewMemDB()}) - _, err := app.InitChain(&abci.RequestInitChain{ - ConsensusParams: cp, - }) - require.NoError(t, err) - - wasPrepareCheckStateCalled := false - app.SetPrepareCheckStater(func(ctx sdk.Context) { - wasPrepareCheckStateCalled = true - }) - app.Seal() - - _, err = app.Commit() - require.NoError(t, err) - require.Equal(t, true, wasPrepareCheckStateCalled) -} - -func TestBaseApp_Precommit(t *testing.T) { - db := dbm.NewMemDB() - name := t.Name() - logger := log.NewTestLogger(t) - - cp := &cmtproto.ConsensusParams{ - Block: &cmtproto.BlockParams{ - MaxGas: 5000000, - }, - } - - app := baseapp.NewBaseApp(name, logger, db, nil) - app.SetParamStore(¶mStore{db: dbm.NewMemDB()}) - _, err := app.InitChain(&abci.RequestInitChain{ - ConsensusParams: cp, - }) - require.NoError(t, err) - - wasPrecommiterCalled := false - app.SetPrecommiter(func(ctx sdk.Context) { - wasPrecommiterCalled = true - }) - app.Seal() - - _, err = app.Commit() - require.NoError(t, err) - require.Equal(t, true, wasPrecommiterCalled) -} - -func TestABCI_CheckTx(t *testing.T) { - // This ante handler reads the key and checks that the value matches the - // current counter. This ensures changes to the KVStore persist across - // successive CheckTx runs. - counterKey := []byte("counter-key") - anteOpt := func(bapp *baseapp.BaseApp) { bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, counterKey)) } - suite := NewBaseAppSuite(t, anteOpt) - - baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), CounterServerImpl{t, capKey1, counterKey}) - - nTxs := int64(5) - _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ - ConsensusParams: &cmtproto.ConsensusParams{}, - }) - require.NoError(t, err) - - for i := int64(0); i < nTxs; i++ { - tx := newTxCounter(t, suite.txConfig, i, 0) // no messages - txBytes, err := suite.txConfig.TxEncoder()(tx) - require.NoError(t, err) - - r, err := suite.baseApp.CheckTx(&abci.RequestCheckTx{Tx: txBytes}) - require.NoError(t, err) - require.True(t, r.IsOK(), fmt.Sprintf("%v", r)) - require.Empty(t, r.GetEvents()) - } - - checkStateStore := getCheckStateCtx(suite.baseApp).KVStore(capKey1) - storedCounter := getIntFromStore(t, checkStateStore, counterKey) - - // ensure AnteHandler ran - require.Equal(t, nTxs, storedCounter) - - // if a block is committed, CheckTx state should be reset - _, err = suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{ - Height: 1, - Hash: []byte("hash"), - }) - require.NoError(t, err) - - require.NotNil(t, getCheckStateCtx(suite.baseApp).BlockGasMeter(), "block gas meter should have been set to checkState") - require.NotEmpty(t, getCheckStateCtx(suite.baseApp).HeaderHash()) - - _, err = suite.baseApp.Commit() - require.NoError(t, err) - - checkStateStore = getCheckStateCtx(suite.baseApp).KVStore(capKey1) - storedBytes := checkStateStore.Get(counterKey) - require.Nil(t, storedBytes) -} - -func TestABCI_FinalizeBlock_DeliverTx(t *testing.T) { - anteKey := []byte("ante-key") - anteOpt := func(bapp *baseapp.BaseApp) { bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) } - suite := NewBaseAppSuite(t, anteOpt) - - _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ - ConsensusParams: &cmtproto.ConsensusParams{}, - }) - require.NoError(t, err) - - deliverKey := []byte("deliver-key") - baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), CounterServerImpl{t, capKey1, deliverKey}) - - nBlocks := 3 - txPerHeight := 5 - - for blockN := 0; blockN < nBlocks; blockN++ { - - txs := [][]byte{} - for i := 0; i < txPerHeight; i++ { - counter := int64(blockN*txPerHeight + i) - tx := newTxCounter(t, suite.txConfig, counter, counter) - - txBytes, err := suite.txConfig.TxEncoder()(tx) - require.NoError(t, err) - - txs = append(txs, txBytes) - } - - res, err := suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{ - Height: int64(blockN) + 1, - Txs: txs, - }) - require.NoError(t, err) - - for i := 0; i < txPerHeight; i++ { - counter := int64(blockN*txPerHeight + i) - require.True(t, res.TxResults[i].IsOK(), fmt.Sprintf("%v", res)) - - events := res.TxResults[i].GetEvents() - require.Len(t, events, 3, "should contain ante handler, message type and counter events respectively") - require.Equal(t, sdk.MarkEventsToIndex(counterEvent("ante_handler", counter).ToABCIEvents(), map[string]struct{}{})[0], events[0], "ante handler event") - require.Equal(t, sdk.MarkEventsToIndex(counterEvent(sdk.EventTypeMessage, counter).ToABCIEvents(), map[string]struct{}{})[0].Attributes[0], events[2].Attributes[0], "msg handler update counter event") - } - - _, err = suite.baseApp.Commit() - require.NoError(t, err) - } -} - -func TestABCI_FinalizeBlock_MultiMsg(t *testing.T) { - anteKey := []byte("ante-key") - anteOpt := func(bapp *baseapp.BaseApp) { bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) } - suite := NewBaseAppSuite(t, anteOpt) - - _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ - ConsensusParams: &cmtproto.ConsensusParams{}, - }) - require.NoError(t, err) - - deliverKey := []byte("deliver-key") - baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), CounterServerImpl{t, capKey1, deliverKey}) - - deliverKey2 := []byte("deliver-key2") - baseapptestutil.RegisterCounter2Server(suite.baseApp.MsgServiceRouter(), Counter2ServerImpl{t, capKey1, deliverKey2}) - - // run a multi-msg tx - // with all msgs the same route - tx := newTxCounter(t, suite.txConfig, 0, 0, 1, 2) - txBytes, err := suite.txConfig.TxEncoder()(tx) - require.NoError(t, err) - - _, err = suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{ - Height: 1, - Txs: [][]byte{txBytes}, - }) - require.NoError(t, err) - - store := getFinalizeBlockStateCtx(suite.baseApp).KVStore(capKey1) - - // tx counter only incremented once - txCounter := getIntFromStore(t, store, anteKey) - require.Equal(t, int64(1), txCounter) - - // msg counter incremented three times - msgCounter := getIntFromStore(t, store, deliverKey) - require.Equal(t, int64(3), msgCounter) - - // replace the second message with a Counter2 - tx = newTxCounter(t, suite.txConfig, 1, 3) - - builder := suite.txConfig.NewTxBuilder() - msgs := tx.GetMsgs() - _, _, addr := testdata.KeyTestPubAddr() - msgs = append(msgs, &baseapptestutil.MsgCounter2{Counter: 0, Signer: addr.String()}) - msgs = append(msgs, &baseapptestutil.MsgCounter2{Counter: 1, Signer: addr.String()}) - - builder.SetMsgs(msgs...) - builder.SetMemo(tx.GetMemo()) - setTxSignature(t, builder, 0) - - txBytes, err = suite.txConfig.TxEncoder()(builder.GetTx()) - require.NoError(t, err) - - _, err = suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{ - Height: 1, - Txs: [][]byte{txBytes}, - }) - require.NoError(t, err) - - store = getFinalizeBlockStateCtx(suite.baseApp).KVStore(capKey1) - - // tx counter only incremented once - txCounter = getIntFromStore(t, store, anteKey) - require.Equal(t, int64(2), txCounter) - - // original counter increments by one - // new counter increments by two - msgCounter = getIntFromStore(t, store, deliverKey) - require.Equal(t, int64(4), msgCounter) - - msgCounter2 := getIntFromStore(t, store, deliverKey2) - require.Equal(t, int64(2), msgCounter2) -} - -func TestABCI_Query_SimulateTx(t *testing.T) { - gasConsumed := uint64(5) - anteOpt := func(bapp *baseapp.BaseApp) { - bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { - newCtx = ctx.WithGasMeter(storetypes.NewGasMeter(gasConsumed)) - return - }) - } - suite := NewBaseAppSuite(t, anteOpt) - - _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ - ConsensusParams: &cmtproto.ConsensusParams{}, - }) - require.NoError(t, err) - - baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), CounterServerImplGasMeterOnly{gasConsumed}) - - nBlocks := 3 - for blockN := 0; blockN < nBlocks; blockN++ { - count := int64(blockN + 1) - - tx := newTxCounter(t, suite.txConfig, count, count) - - txBytes, err := suite.txConfig.TxEncoder()(tx) - require.Nil(t, err) - - // simulate a message, check gas reported - gInfo, result, err := suite.baseApp.Simulate(txBytes) - require.NoError(t, err) - require.NotNil(t, result) - require.Equal(t, gasConsumed, gInfo.GasUsed) - - // simulate again, same result - gInfo, result, err = suite.baseApp.Simulate(txBytes) - require.NoError(t, err) - require.NotNil(t, result) - require.Equal(t, gasConsumed, gInfo.GasUsed) - - // simulate by calling Query with encoded tx - query := abci.RequestQuery{ - Path: "/app/simulate", - Data: txBytes, - } - queryResult, err := suite.baseApp.Query(context.TODO(), &query) - require.NoError(t, err) - require.True(t, queryResult.IsOK(), queryResult.Log) - - var simRes sdk.SimulationResponse - require.NoError(t, jsonpb.Unmarshal(strings.NewReader(string(queryResult.Value)), &simRes)) - - require.Equal(t, gInfo, simRes.GasInfo) - require.Equal(t, result.Log, simRes.Result.Log) - require.Equal(t, result.Events, simRes.Result.Events) - require.True(t, bytes.Equal(result.Data, simRes.Result.Data)) - - _, err = suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{Height: count}) - require.NoError(t, err) - _, err = suite.baseApp.Commit() - require.NoError(t, err) - } -} - -func TestABCI_InvalidTransaction(t *testing.T) { - anteOpt := func(bapp *baseapp.BaseApp) { - bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { - return - }) - } - - suite := NewBaseAppSuite(t, anteOpt) - baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), CounterServerImplGasMeterOnly{}) - - _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ - ConsensusParams: &cmtproto.ConsensusParams{}, - }) - require.NoError(t, err) - - _, err = suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{ - Height: 1, - }) - require.NoError(t, err) - - // malformed transaction bytes - { - bz := []byte("example vote extension") - result, err := suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{ - Height: 1, - Txs: [][]byte{bz}, - }) - - require.EqualValues(t, sdkerrors.ErrTxDecode.Codespace(), result.TxResults[0].Codespace, err) - require.EqualValues(t, sdkerrors.ErrTxDecode.ABCICode(), result.TxResults[0].Code, err) - require.EqualValues(t, 0, result.TxResults[0].GasUsed, err) - require.EqualValues(t, 0, result.TxResults[0].GasWanted, err) - } - // transaction with no messages - { - emptyTx := suite.txConfig.NewTxBuilder().GetTx() - bz, err := suite.txConfig.TxEncoder()(emptyTx) - require.NoError(t, err) - result, err := suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{ - Height: 1, - Txs: [][]byte{bz}, - }) - require.EqualValues(t, sdkerrors.ErrInvalidRequest.Codespace(), result.TxResults[0].Codespace, err) - require.EqualValues(t, sdkerrors.ErrInvalidRequest.ABCICode(), result.TxResults[0].Code, err) - } - - // transaction where ValidateBasic fails - { - testCases := []struct { - tx signing.Tx - fail bool - }{ - {newTxCounter(t, suite.txConfig, 0, 0), false}, - {newTxCounter(t, suite.txConfig, -1, 0), false}, - {newTxCounter(t, suite.txConfig, 100, 100), false}, - {newTxCounter(t, suite.txConfig, 100, 5, 4, 3, 2, 1), false}, - - {newTxCounter(t, suite.txConfig, 0, -1), true}, - {newTxCounter(t, suite.txConfig, 0, 1, -2), true}, - {newTxCounter(t, suite.txConfig, 0, 1, 2, -10, 5), true}, - } - - for _, testCase := range testCases { - tx := testCase.tx - _, result, err := suite.baseApp.SimDeliver(suite.txConfig.TxEncoder(), tx) - - if testCase.fail { - require.Error(t, err) - - space, code, _ := errorsmod.ABCIInfo(err, false) - require.EqualValues(t, sdkerrors.ErrInvalidSequence.Codespace(), space, err) - require.EqualValues(t, sdkerrors.ErrInvalidSequence.ABCICode(), code, err) - } else { - require.NotNil(t, result) - } - } - } - - // transaction with no known route - { - txBuilder := suite.txConfig.NewTxBuilder() - _, _, addr := testdata.KeyTestPubAddr() - txBuilder.SetMsgs(&baseapptestutil.MsgCounter2{Signer: addr.String()}) - setTxSignature(t, txBuilder, 0) - unknownRouteTx := txBuilder.GetTx() - - _, result, err := suite.baseApp.SimDeliver(suite.txConfig.TxEncoder(), unknownRouteTx) - require.Error(t, err) - require.Nil(t, result) - - space, code, _ := errorsmod.ABCIInfo(err, false) - require.EqualValues(t, sdkerrors.ErrUnknownRequest.Codespace(), space, err) - require.EqualValues(t, sdkerrors.ErrUnknownRequest.ABCICode(), code, err) - - txBuilder = suite.txConfig.NewTxBuilder() - txBuilder.SetMsgs( - &baseapptestutil.MsgCounter{Signer: addr.String()}, - &baseapptestutil.MsgCounter2{Signer: addr.String()}, - ) - setTxSignature(t, txBuilder, 0) - unknownRouteTx = txBuilder.GetTx() - - _, result, err = suite.baseApp.SimDeliver(suite.txConfig.TxEncoder(), unknownRouteTx) - require.Error(t, err) - require.Nil(t, result) - - space, code, _ = errorsmod.ABCIInfo(err, false) - require.EqualValues(t, sdkerrors.ErrUnknownRequest.Codespace(), space, err) - require.EqualValues(t, sdkerrors.ErrUnknownRequest.ABCICode(), code, err) - } - - // Transaction with an unregistered message - { - txBuilder := suite.txConfig.NewTxBuilder() - txBuilder.SetMsgs(&testdata.MsgCreateDog{}) - tx := txBuilder.GetTx() - - _, _, err := suite.baseApp.SimDeliver(suite.txConfig.TxEncoder(), tx) - require.Error(t, err) - space, code, _ := errorsmod.ABCIInfo(err, false) - require.EqualValues(t, sdkerrors.ErrTxDecode.ABCICode(), code) - require.EqualValues(t, sdkerrors.ErrTxDecode.Codespace(), space) - } -} - -func TestABCI_TxGasLimits(t *testing.T) { - gasGranted := uint64(10) - anteOpt := func(bapp *baseapp.BaseApp) { - bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { - newCtx = ctx.WithGasMeter(storetypes.NewGasMeter(gasGranted)) - - // AnteHandlers must have their own defer/recover in order for the BaseApp - // to know how much gas was used! This is because the GasMeter is created in - // the AnteHandler, but if it panics the context won't be set properly in - // runTx's recover call. - defer func() { - if r := recover(); r != nil { - switch rType := r.(type) { - case storetypes.ErrorOutOfGas: - err = errorsmod.Wrapf(sdkerrors.ErrOutOfGas, "out of gas in location: %v", rType.Descriptor) - default: - panic(r) - } - } - }() - - count, _ := parseTxMemo(t, tx) - newCtx.GasMeter().ConsumeGas(uint64(count), "counter-ante") - - return newCtx, nil - }) - } - - suite := NewBaseAppSuite(t, anteOpt) - baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), CounterServerImplGasMeterOnly{}) - - _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ - ConsensusParams: &cmtproto.ConsensusParams{}, - }) - require.NoError(t, err) - - _, err = suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{ - Height: 1, - }) - require.NoError(t, err) - - _, err = suite.baseApp.Commit() - require.NoError(t, err) - - testCases := []struct { - tx signing.Tx - gasUsed int64 - fail bool - }{ - {newTxCounter(t, suite.txConfig, 0, 0), 0, false}, - {newTxCounter(t, suite.txConfig, 1, 1), 2, false}, - {newTxCounter(t, suite.txConfig, 9, 1), 10, false}, - {newTxCounter(t, suite.txConfig, 1, 9), 10, false}, - {newTxCounter(t, suite.txConfig, 10, 0), 10, false}, - - {newTxCounter(t, suite.txConfig, 9, 2), 11, true}, - {newTxCounter(t, suite.txConfig, 2, 9), 11, true}, - // {newTxCounter(t, suite.txConfig, 9, 1, 1), 11, true}, - // {newTxCounter(t, suite.txConfig, 1, 8, 1, 1), 11, true}, - // {newTxCounter(t, suite.txConfig, 11, 0), 11, true}, - // {newTxCounter(t, suite.txConfig, 0, 11), 11, true}, - // {newTxCounter(t, suite.txConfig, 0, 5, 11), 16, true}, - } - - txs := [][]byte{} - for _, tc := range testCases { - tx := tc.tx - bz, err := suite.txConfig.TxEncoder()(tx) - require.NoError(t, err) - txs = append(txs, bz) - } - - // Deliver the txs - res, err := suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{ - Height: 2, - Txs: txs, - }) - - require.NoError(t, err) - - for i, tc := range testCases { - - result := res.TxResults[i] - - require.Equal(t, tc.gasUsed, result.GasUsed, fmt.Sprintf("tc #%d; gas: %v, result: %v, err: %s", i, result.GasUsed, result, err)) - - // check for out of gas - if !tc.fail { - require.NotNil(t, result, fmt.Sprintf("%d: %v, %v", i, tc, err)) - } else { - require.EqualValues(t, sdkerrors.ErrOutOfGas.Codespace(), result.Codespace, err) - require.EqualValues(t, sdkerrors.ErrOutOfGas.ABCICode(), result.Code, err) - } - } -} - -func TestABCI_MaxBlockGasLimits(t *testing.T) { - gasGranted := uint64(10) - anteOpt := func(bapp *baseapp.BaseApp) { - bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { - newCtx = ctx.WithGasMeter(storetypes.NewGasMeter(gasGranted)) - - defer func() { - if r := recover(); r != nil { - switch rType := r.(type) { - case storetypes.ErrorOutOfGas: - err = errorsmod.Wrapf(sdkerrors.ErrOutOfGas, "out of gas in location: %v", rType.Descriptor) - default: - panic(r) - } - } - }() - - count, _ := parseTxMemo(t, tx) - newCtx.GasMeter().ConsumeGas(uint64(count), "counter-ante") - - return - }) - } - - suite := NewBaseAppSuite(t, anteOpt) - baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), CounterServerImplGasMeterOnly{}) - - _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ - ConsensusParams: &cmtproto.ConsensusParams{ - Block: &cmtproto.BlockParams{ - MaxGas: 100, - }, - }, - }) - require.NoError(t, err) - - _, err = suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1}) - require.NoError(t, err) - - testCases := []struct { - tx signing.Tx - numDelivers int - gasUsedPerDeliver uint64 - fail bool - failAfterDeliver int - }{ - {newTxCounter(t, suite.txConfig, 0, 0), 0, 0, false, 0}, - {newTxCounter(t, suite.txConfig, 9, 1), 2, 10, false, 0}, - {newTxCounter(t, suite.txConfig, 10, 0), 3, 10, false, 0}, - {newTxCounter(t, suite.txConfig, 10, 0), 10, 10, false, 0}, - {newTxCounter(t, suite.txConfig, 2, 7), 11, 9, false, 0}, - // {newTxCounter(t, suite.txConfig, 10, 0), 10, 10, false, 0}, // hit the limit but pass - - // {newTxCounter(t, suite.txConfig, 10, 0), 11, 10, true, 10}, - // {newTxCounter(t, suite.txConfig, 10, 0), 15, 10, true, 10}, - // {newTxCounter(t, suite.txConfig, 9, 0), 12, 9, true, 11}, // fly past the limit - } - - for i, tc := range testCases { - tx := tc.tx - - // reset block gas - _, err = suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{Height: suite.baseApp.LastBlockHeight() + 1}) - require.NoError(t, err) - - // execute the transaction multiple times - for j := 0; j < tc.numDelivers; j++ { - - _, result, err := suite.baseApp.SimDeliver(suite.txConfig.TxEncoder(), tx) - - ctx := getFinalizeBlockStateCtx(suite.baseApp) - - // check for failed transactions - if tc.fail && (j+1) > tc.failAfterDeliver { - require.Error(t, err, fmt.Sprintf("tc #%d; result: %v, err: %s", i, result, err)) - require.Nil(t, tx, fmt.Sprintf("tc #%d; result: %v, err: %s", i, result, err)) - - space, code, _ := errorsmod.ABCIInfo(err, false) - require.EqualValues(t, sdkerrors.ErrOutOfGas.Codespace(), space, err) - require.EqualValues(t, sdkerrors.ErrOutOfGas.ABCICode(), code, err) - require.True(t, ctx.BlockGasMeter().IsOutOfGas()) - } else { - // check gas used and wanted - blockGasUsed := ctx.BlockGasMeter().GasConsumed() - expBlockGasUsed := tc.gasUsedPerDeliver * uint64(j+1) - require.Equal( - t, expBlockGasUsed, blockGasUsed, - fmt.Sprintf("%d,%d: %v, %v, %v, %v", i, j, tc, expBlockGasUsed, blockGasUsed, result), - ) - - require.NotNil(t, tx, fmt.Sprintf("tc #%d; currDeliver: %d, result: %v, err: %s", i, j, result, err)) - require.False(t, ctx.BlockGasMeter().IsPastLimit()) - } - } - } -} - -func TestABCI_GasConsumptionBadTx(t *testing.T) { - gasWanted := uint64(5) - anteOpt := func(bapp *baseapp.BaseApp) { - bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { - newCtx = ctx.WithGasMeter(storetypes.NewGasMeter(gasWanted)) - - defer func() { - if r := recover(); r != nil { - switch rType := r.(type) { - case storetypes.ErrorOutOfGas: - log := fmt.Sprintf("out of gas in location: %v", rType.Descriptor) - err = errorsmod.Wrap(sdkerrors.ErrOutOfGas, log) - default: - panic(r) - } - } - }() - - counter, failOnAnte := parseTxMemo(t, tx) - newCtx.GasMeter().ConsumeGas(uint64(counter), "counter-ante") - if failOnAnte { - return newCtx, errorsmod.Wrap(sdkerrors.ErrUnauthorized, "ante handler failure") - } - - return - }) - } - - suite := NewBaseAppSuite(t, anteOpt) - baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), CounterServerImplGasMeterOnly{}) - - _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ - ConsensusParams: &cmtproto.ConsensusParams{ - Block: &cmtproto.BlockParams{ - MaxGas: 9, - }, - }, - }) - require.NoError(t, err) - - tx := newTxCounter(t, suite.txConfig, 5, 0) - tx = setFailOnAnte(t, suite.txConfig, tx, true) - txBytes, err := suite.txConfig.TxEncoder()(tx) - require.NoError(t, err) - - // require next tx to fail due to black gas limit - tx = newTxCounter(t, suite.txConfig, 5, 0) - txBytes2, err := suite.txConfig.TxEncoder()(tx) - require.NoError(t, err) - - _, err = suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{ - Height: suite.baseApp.LastBlockHeight() + 1, - Txs: [][]byte{txBytes, txBytes2}, - }) - require.NoError(t, err) -} - -func TestABCI_Query(t *testing.T) { - key, value := []byte("hello"), []byte("goodbye") - anteOpt := func(bapp *baseapp.BaseApp) { - bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { - store := ctx.KVStore(capKey1) - store.Set(key, value) - return - }) - } - - suite := NewBaseAppSuite(t, anteOpt) - baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), CounterServerImplGasMeterOnly{}) - - _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ - ConsensusParams: &cmtproto.ConsensusParams{}, - }) - require.NoError(t, err) - - // NOTE: "/store/key1" tells us KVStore - // and the final "/key" says to use the data as the - // key in the given KVStore ... - query := abci.RequestQuery{ - Path: "/store/key1/key", - Data: key, - } - tx := newTxCounter(t, suite.txConfig, 0, 0) - - // query is empty before we do anything - res, err := suite.baseApp.Query(context.TODO(), &query) - require.NoError(t, err) - require.Equal(t, 0, len(res.Value)) - - // query is still empty after a CheckTx - _, resTx, err := suite.baseApp.SimCheck(suite.txConfig.TxEncoder(), tx) - require.NoError(t, err) - require.NotNil(t, resTx) - - res, err = suite.baseApp.Query(context.TODO(), &query) - require.NoError(t, err) - require.Equal(t, 0, len(res.Value)) - - bz, err := suite.txConfig.TxEncoder()(tx) - require.NoError(t, err) - - _, err = suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{ - Height: 1, - Txs: [][]byte{bz}, - }) - require.NoError(t, err) - - res, err = suite.baseApp.Query(context.TODO(), &query) - require.NoError(t, err) - require.Equal(t, 0, len(res.Value)) - - // query returns correct value after Commit - _, err = suite.baseApp.Commit() - require.NoError(t, err) - - res, err = suite.baseApp.Query(context.TODO(), &query) - require.NoError(t, err) - require.Equal(t, value, res.Value) -} - -func TestABCI_GetBlockRetentionHeight(t *testing.T) { - logger := log.NewTestLogger(t) - db := dbm.NewMemDB() - name := t.Name() - - snapshotStore, err := snapshots.NewStore(dbm.NewMemDB(), testutil.GetTempDir(t)) - require.NoError(t, err) - - testCases := map[string]struct { - bapp *baseapp.BaseApp - maxAgeBlocks int64 - commitHeight int64 - expected int64 - }{ - "defaults": { - bapp: baseapp.NewBaseApp(name, logger, db, nil), - maxAgeBlocks: 0, - commitHeight: 499000, - expected: 0, - }, - "pruning unbonding time only": { - bapp: baseapp.NewBaseApp(name, logger, db, nil, baseapp.SetMinRetainBlocks(1)), - maxAgeBlocks: 362880, - commitHeight: 499000, - expected: 136120, - }, - "pruning iavl snapshot only": { - bapp: baseapp.NewBaseApp( - name, logger, db, nil, - baseapp.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)), - baseapp.SetMinRetainBlocks(1), - baseapp.SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(10000, 1)), - ), - maxAgeBlocks: 0, - commitHeight: 499000, - expected: 489000, - }, - "pruning state sync snapshot only": { - bapp: baseapp.NewBaseApp( - name, logger, db, nil, - baseapp.SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(50000, 3)), - baseapp.SetMinRetainBlocks(1), - ), - maxAgeBlocks: 0, - commitHeight: 499000, - expected: 349000, - }, - "pruning min retention only": { - bapp: baseapp.NewBaseApp( - name, logger, db, nil, - baseapp.SetMinRetainBlocks(400000), - ), - maxAgeBlocks: 0, - commitHeight: 499000, - expected: 99000, - }, - "pruning all conditions": { - bapp: baseapp.NewBaseApp( - name, logger, db, nil, - baseapp.SetPruning(pruningtypes.NewCustomPruningOptions(0, 0)), - baseapp.SetMinRetainBlocks(400000), - baseapp.SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(50000, 3)), - ), - maxAgeBlocks: 362880, - commitHeight: 499000, - expected: 99000, - }, - "no pruning due to no persisted state": { - bapp: baseapp.NewBaseApp( - name, logger, db, nil, - baseapp.SetPruning(pruningtypes.NewCustomPruningOptions(0, 0)), - baseapp.SetMinRetainBlocks(400000), - baseapp.SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(50000, 3)), - ), - maxAgeBlocks: 362880, - commitHeight: 10000, - expected: 0, - }, - "disable pruning": { - bapp: baseapp.NewBaseApp( - name, logger, db, nil, - baseapp.SetPruning(pruningtypes.NewCustomPruningOptions(0, 0)), - baseapp.SetMinRetainBlocks(0), - baseapp.SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(50000, 3)), - ), - maxAgeBlocks: 362880, - commitHeight: 499000, - expected: 0, - }, - } - - for name, tc := range testCases { - tc := tc - - tc.bapp.SetParamStore(¶mStore{db: dbm.NewMemDB()}) - _, err = tc.bapp.InitChain(&abci.RequestInitChain{ - ConsensusParams: &cmtproto.ConsensusParams{ - Evidence: &cmtproto.EvidenceParams{ - MaxAgeNumBlocks: tc.maxAgeBlocks, - }, - }, - }) - require.NoError(t, err) - - t.Run(name, func(t *testing.T) { - require.Equal(t, tc.expected, tc.bapp.GetBlockRetentionHeight(tc.commitHeight)) - }) - } -} - -// Verifies that PrepareCheckState is called with the checkState. -func TestPrepareCheckStateCalledWithCheckState(t *testing.T) { - t.Parallel() - - logger := log.NewTestLogger(t) - db := dbm.NewMemDB() - name := t.Name() - app := baseapp.NewBaseApp(name, logger, db, nil) - - wasPrepareCheckStateCalled := false - app.SetPrepareCheckStater(func(ctx sdk.Context) { - require.Equal(t, true, ctx.IsCheckTx()) - wasPrepareCheckStateCalled = true - }) - - _, err := app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1}) - require.NoError(t, err) - _, err = app.Commit() - require.NoError(t, err) - - require.Equal(t, true, wasPrepareCheckStateCalled) -} - -// Verifies that the Precommiter is called with the deliverState. -func TestPrecommiterCalledWithDeliverState(t *testing.T) { - t.Parallel() - - logger := log.NewTestLogger(t) - db := dbm.NewMemDB() - name := t.Name() - app := baseapp.NewBaseApp(name, logger, db, nil) - - wasPrecommiterCalled := false - app.SetPrecommiter(func(ctx sdk.Context) { - require.Equal(t, false, ctx.IsCheckTx()) - require.Equal(t, false, ctx.IsReCheckTx()) - wasPrecommiterCalled = true - }) - - _, err := app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1}) - require.NoError(t, err) - _, err = app.Commit() - require.NoError(t, err) - - require.Equal(t, true, wasPrecommiterCalled) -} - -func TestABCI_Proposal_HappyPath(t *testing.T) { - anteKey := []byte("ante-key") - pool := mempool.NewSenderNonceMempool() - anteOpt := func(bapp *baseapp.BaseApp) { - bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) - } - - suite := NewBaseAppSuite(t, anteOpt, baseapp.SetMempool(pool)) - baseapptestutil.RegisterKeyValueServer(suite.baseApp.MsgServiceRouter(), MsgKeyValueImpl{}) - baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), NoopCounterServerImpl{}) - - _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ - ConsensusParams: &cmtproto.ConsensusParams{}, - }) - require.NoError(t, err) - - tx := newTxCounter(t, suite.txConfig, 0, 1) - txBytes, err := suite.txConfig.TxEncoder()(tx) - require.NoError(t, err) - - reqCheckTx := abci.RequestCheckTx{ - Tx: txBytes, - Type: abci.CheckTxType_New, - } - _, err = suite.baseApp.CheckTx(&reqCheckTx) - require.NoError(t, err) - - tx2 := newTxCounter(t, suite.txConfig, 1, 1) - - tx2Bytes, err := suite.txConfig.TxEncoder()(tx2) - require.NoError(t, err) - - err = pool.Insert(sdk.Context{}, tx2) - require.NoError(t, err) - - reqPrepareProposal := abci.RequestPrepareProposal{ - MaxTxBytes: 1000, - Height: 1, - } - resPrepareProposal, err := suite.baseApp.PrepareProposal(&reqPrepareProposal) - require.NoError(t, err) - require.Equal(t, 2, len(resPrepareProposal.Txs)) - - reqProposalTxBytes := [2][]byte{ - txBytes, - tx2Bytes, - } - reqProcessProposal := abci.RequestProcessProposal{ - Txs: reqProposalTxBytes[:], - Height: reqPrepareProposal.Height, - } - - resProcessProposal, err := suite.baseApp.ProcessProposal(&reqProcessProposal) - require.NoError(t, err) - require.Equal(t, abci.ResponseProcessProposal_ACCEPT, resProcessProposal.Status) - - // the same txs as in PrepareProposal - res, err := suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{ - Height: suite.baseApp.LastBlockHeight() + 1, - Txs: reqProposalTxBytes[:], - }) - require.NoError(t, err) - - require.Equal(t, 0, pool.CountTx()) - - require.NotEmpty(t, res.TxResults[0].Events) - require.True(t, res.TxResults[0].IsOK(), fmt.Sprintf("%v", res)) -} - -func TestABCI_Proposal_Read_State_PrepareProposal(t *testing.T) { - someKey := []byte("some-key") - - setInitChainerOpt := func(bapp *baseapp.BaseApp) { - bapp.SetInitChainer(func(ctx sdk.Context, req *abci.RequestInitChain) (*abci.ResponseInitChain, error) { - ctx.KVStore(capKey1).Set(someKey, []byte("foo")) - return &abci.ResponseInitChain{}, nil - }) - } - - prepareOpt := func(bapp *baseapp.BaseApp) { - bapp.SetPrepareProposal(func(ctx sdk.Context, req *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error) { - value := ctx.KVStore(capKey1).Get(someKey) - // We should be able to access any state written in InitChain - require.Equal(t, "foo", string(value)) - return &abci.ResponsePrepareProposal{Txs: req.Txs}, nil - }) - } - - suite := NewBaseAppSuite(t, setInitChainerOpt, prepareOpt) - - _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ - InitialHeight: 1, - ConsensusParams: &cmtproto.ConsensusParams{}, - }) - require.NoError(t, err) - - reqPrepareProposal := abci.RequestPrepareProposal{ - MaxTxBytes: 1000, - Height: 1, // this value can't be 0 - } - resPrepareProposal, err := suite.baseApp.PrepareProposal(&reqPrepareProposal) - require.NoError(t, err) - require.Equal(t, 0, len(resPrepareProposal.Txs)) - - reqProposalTxBytes := [][]byte{} - reqProcessProposal := abci.RequestProcessProposal{ - Txs: reqProposalTxBytes, - Height: reqPrepareProposal.Height, - } - - resProcessProposal, err := suite.baseApp.ProcessProposal(&reqProcessProposal) - require.NoError(t, err) - require.Equal(t, abci.ResponseProcessProposal_ACCEPT, resProcessProposal.Status) -} - -func TestABCI_Proposals_WithVE(t *testing.T) { - someVoteExtension := []byte("some-vote-extension") - - setInitChainerOpt := func(bapp *baseapp.BaseApp) { - bapp.SetInitChainer(func(ctx sdk.Context, req *abci.RequestInitChain) (*abci.ResponseInitChain, error) { - return &abci.ResponseInitChain{}, nil - }) - } - - prepareOpt := func(bapp *baseapp.BaseApp) { - bapp.SetPrepareProposal(func(ctx sdk.Context, req *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error) { - // Inject the vote extension to the beginning of the proposal - txs := make([][]byte, len(req.Txs)+1) - txs[0] = someVoteExtension - copy(txs[1:], req.Txs) - - return &abci.ResponsePrepareProposal{Txs: txs}, nil - }) - - bapp.SetProcessProposal(func(ctx sdk.Context, req *abci.RequestProcessProposal) (*abci.ResponseProcessProposal, error) { - // Check that the vote extension is still there - require.Equal(t, someVoteExtension, req.Txs[0]) - return &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_ACCEPT}, nil - }) - } - - suite := NewBaseAppSuite(t, setInitChainerOpt, prepareOpt) - - suite.baseApp.InitChain(&abci.RequestInitChain{ - InitialHeight: 1, - ConsensusParams: &cmtproto.ConsensusParams{}, - }) - - reqPrepareProposal := abci.RequestPrepareProposal{ - MaxTxBytes: 100000, - Height: 1, // this value can't be 0 - } - resPrepareProposal, err := suite.baseApp.PrepareProposal(&reqPrepareProposal) - require.NoError(t, err) - require.Equal(t, 1, len(resPrepareProposal.Txs)) - - reqProcessProposal := abci.RequestProcessProposal{ - Txs: resPrepareProposal.Txs, - Height: reqPrepareProposal.Height, - } - resProcessProposal, err := suite.baseApp.ProcessProposal(&reqProcessProposal) - require.NoError(t, err) - require.Equal(t, abci.ResponseProcessProposal_ACCEPT, resProcessProposal.Status) - - // Run finalize block and ensure that the vote extension is still there and that - // the proposal is accepted - result, err := suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{ - Txs: resPrepareProposal.Txs, - Height: reqPrepareProposal.Height, - }) - require.NoError(t, err) - require.Equal(t, 1, len(result.TxResults)) - require.EqualValues(t, sdkerrors.ErrTxDecode.Codespace(), result.TxResults[0].Codespace, err) - require.EqualValues(t, sdkerrors.ErrTxDecode.ABCICode(), result.TxResults[0].Code, err) - require.EqualValues(t, 0, result.TxResults[0].GasUsed, err) - require.EqualValues(t, 0, result.TxResults[0].GasWanted, err) -} - -func TestABCI_PrepareProposal_ReachedMaxBytes(t *testing.T) { - anteKey := []byte("ante-key") - pool := mempool.NewSenderNonceMempool() - anteOpt := func(bapp *baseapp.BaseApp) { - bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) - } - - suite := NewBaseAppSuite(t, anteOpt, baseapp.SetMempool(pool)) - baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), NoopCounterServerImpl{}) - - _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ - ConsensusParams: &cmtproto.ConsensusParams{}, - }) - require.NoError(t, err) - - for i := 0; i < 100; i++ { - tx2 := newTxCounter(t, suite.txConfig, int64(i), int64(i)) - err := pool.Insert(sdk.Context{}, tx2) - require.NoError(t, err) - } - - reqPrepareProposal := abci.RequestPrepareProposal{ - MaxTxBytes: 1500, - Height: 1, - } - resPrepareProposal, err := suite.baseApp.PrepareProposal(&reqPrepareProposal) - require.NoError(t, err) - require.Equal(t, 8, len(resPrepareProposal.Txs)) -} - -func TestABCI_PrepareProposal_BadEncoding(t *testing.T) { - anteKey := []byte("ante-key") - pool := mempool.NewSenderNonceMempool() - anteOpt := func(bapp *baseapp.BaseApp) { - bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) - } - - suite := NewBaseAppSuite(t, anteOpt, baseapp.SetMempool(pool)) - baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), NoopCounterServerImpl{}) - - _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ - ConsensusParams: &cmtproto.ConsensusParams{}, - }) - require.NoError(t, err) - - tx := newTxCounter(t, suite.txConfig, 0, 0) - err = pool.Insert(sdk.Context{}, tx) - require.NoError(t, err) - - reqPrepareProposal := abci.RequestPrepareProposal{ - MaxTxBytes: 1000, - Height: 1, - } - resPrepareProposal, err := suite.baseApp.PrepareProposal(&reqPrepareProposal) - require.NoError(t, err) - require.Equal(t, 1, len(resPrepareProposal.Txs)) -} - -func TestABCI_PrepareProposal_OverGasUnderBytes(t *testing.T) { - pool := mempool.NewSenderNonceMempool() - suite := NewBaseAppSuite(t, baseapp.SetMempool(pool)) - baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), NoopCounterServerImpl{}) - - // set max block gas limit to 99, this will allow 9 txs of 10 gas each. - _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ - ConsensusParams: &cmtproto.ConsensusParams{ - Block: &cmtproto.BlockParams{MaxGas: 99}, - }, - }) - - require.NoError(t, err) - // insert 100 txs, each with a gas limit of 10 - _, _, addr := testdata.KeyTestPubAddr() - for i := int64(0); i < 100; i++ { - msg := &baseapptestutil.MsgCounter{Counter: i, FailOnHandler: false, Signer: addr.String()} - msgs := []sdk.Msg{msg} - - builder := suite.txConfig.NewTxBuilder() - err = builder.SetMsgs(msgs...) - require.NoError(t, err) - builder.SetMemo("counter=" + strconv.FormatInt(i, 10) + "&failOnAnte=false") - builder.SetGasLimit(10) - setTxSignature(t, builder, uint64(i)) - - err := pool.Insert(sdk.Context{}, builder.GetTx()) - require.NoError(t, err) - } - - // ensure we only select transactions that fit within the block gas limit - res, err := suite.baseApp.PrepareProposal(&abci.RequestPrepareProposal{ - MaxTxBytes: 1_000_000, // large enough to ignore restriction - Height: 1, - }) - require.NoError(t, err) - - // Should include 9 transactions - require.Len(t, res.Txs, 9, "invalid number of transactions returned") -} - -func TestABCI_PrepareProposal_MaxGas(t *testing.T) { - pool := mempool.NewSenderNonceMempool() - suite := NewBaseAppSuite(t, baseapp.SetMempool(pool)) - baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), NoopCounterServerImpl{}) - - // set max block gas limit to 100 - suite.baseApp.InitChain(&abci.RequestInitChain{ - ConsensusParams: &cmtproto.ConsensusParams{ - Block: &cmtproto.BlockParams{MaxGas: 100}, - }, - }) - - // insert 100 txs, each with a gas limit of 10 - _, _, addr := testdata.KeyTestPubAddr() - for i := int64(0); i < 100; i++ { - msg := &baseapptestutil.MsgCounter{Counter: i, FailOnHandler: false, Signer: addr.String()} - msgs := []sdk.Msg{msg} - - builder := suite.txConfig.NewTxBuilder() - builder.SetMsgs(msgs...) - builder.SetMemo("counter=" + strconv.FormatInt(i, 10) + "&failOnAnte=false") - builder.SetGasLimit(10) - setTxSignature(t, builder, uint64(i)) - - err := pool.Insert(sdk.Context{}, builder.GetTx()) - require.NoError(t, err) - } - - // ensure we only select transactions that fit within the block gas limit - res, err := suite.baseApp.PrepareProposal(&abci.RequestPrepareProposal{ - MaxTxBytes: 1_000_000, // large enough to ignore restriction - Height: 1, - }) - require.NoError(t, err) - require.Len(t, res.Txs, 10, "invalid number of transactions returned") -} - -func TestABCI_PrepareProposal_Failures(t *testing.T) { - anteKey := []byte("ante-key") - pool := mempool.NewSenderNonceMempool() - anteOpt := func(bapp *baseapp.BaseApp) { - bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) - } - - suite := NewBaseAppSuite(t, anteOpt, baseapp.SetMempool(pool)) - baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), NoopCounterServerImpl{}) - - _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ - ConsensusParams: &cmtproto.ConsensusParams{}, - }) - require.NoError(t, err) - - tx := newTxCounter(t, suite.txConfig, 0, 0) - txBytes, err := suite.txConfig.TxEncoder()(tx) - require.NoError(t, err) - - reqCheckTx := abci.RequestCheckTx{ - Tx: txBytes, - Type: abci.CheckTxType_New, - } - checkTxRes, err := suite.baseApp.CheckTx(&reqCheckTx) - require.NoError(t, err) - require.True(t, checkTxRes.IsOK()) - - failTx := newTxCounter(t, suite.txConfig, 1, 1) - failTx = setFailOnAnte(t, suite.txConfig, failTx, true) - - err = pool.Insert(sdk.Context{}, failTx) - require.NoError(t, err) - require.Equal(t, 2, pool.CountTx()) - - req := abci.RequestPrepareProposal{ - MaxTxBytes: 1000, - Height: 1, - } - res, err := suite.baseApp.PrepareProposal(&req) - require.NoError(t, err) - require.Equal(t, 1, len(res.Txs)) -} - -func TestABCI_PrepareProposal_PanicRecovery(t *testing.T) { - prepareOpt := func(app *baseapp.BaseApp) { - app.SetPrepareProposal(func(ctx sdk.Context, rpp *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error) { - panic(errors.New("test")) - }) - } - suite := NewBaseAppSuite(t, prepareOpt) - - _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ - ConsensusParams: &cmtproto.ConsensusParams{}, - }) - require.NoError(t, err) - - req := abci.RequestPrepareProposal{ - MaxTxBytes: 1000, - Height: 1, - } - - require.NotPanics(t, func() { - res, err := suite.baseApp.PrepareProposal(&req) - require.NoError(t, err) - require.Equal(t, req.Txs, res.Txs) - }) -} - -func TestABCI_PrepareProposal_VoteExtensions(t *testing.T) { - // set up mocks - ctrl := gomock.NewController(t) - valStore := mock.NewMockValidatorStore(ctrl) - privkey := secp256k1.GenPrivKey() - pubkey := privkey.PubKey() - addr := sdk.AccAddress(pubkey.Address()) - tmPk := cmtprotocrypto.PublicKey{ - Sum: &cmtprotocrypto.PublicKey_Secp256K1{ - Secp256K1: pubkey.Bytes(), - }, - } - - consAddr := sdk.ConsAddress(addr.String()) - valStore.EXPECT().GetPubKeyByConsAddr(gomock.Any(), consAddr.Bytes()).Return(tmPk, nil) - - // set up baseapp - prepareOpt := func(bapp *baseapp.BaseApp) { - bapp.SetPrepareProposal(func(ctx sdk.Context, req *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error) { - err := baseapp.ValidateVoteExtensions(ctx, valStore, req.Height, bapp.ChainID(), req.LocalLastCommit) - if err != nil { - return nil, err - } - - cp := ctx.ConsensusParams() - extsEnabled := cp.Abci != nil && req.Height >= cp.Abci.VoteExtensionsEnableHeight && cp.Abci.VoteExtensionsEnableHeight != 0 - if extsEnabled { - req.Txs = append(req.Txs, []byte("some-tx-that-does-something-from-votes")) - } - return &abci.ResponsePrepareProposal{Txs: req.Txs}, nil - }) - } - - suite := NewBaseAppSuite(t, prepareOpt) - - _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ - InitialHeight: 1, - ConsensusParams: &cmtproto.ConsensusParams{ - Abci: &cmtproto.ABCIParams{ - VoteExtensionsEnableHeight: 2, - }, - }, - }) - require.NoError(t, err) - - // first test without vote extensions, no new txs should be added - reqPrepareProposal := abci.RequestPrepareProposal{ - MaxTxBytes: 1000, - Height: 1, // this value can't be 0 - } - resPrepareProposal, err := suite.baseApp.PrepareProposal(&reqPrepareProposal) - require.NoError(t, err) - require.Equal(t, 0, len(resPrepareProposal.Txs)) - - // now we try with vote extensions, a new tx should show up - marshalDelimitedFn := func(msg proto.Message) ([]byte, error) { - var buf bytes.Buffer - if err := protoio.NewDelimitedWriter(&buf).WriteMsg(msg); err != nil { - return nil, err - } - - return buf.Bytes(), nil - } - - ext := []byte("something") - cve := cmtproto.CanonicalVoteExtension{ - Extension: ext, - Height: 2, // the vote extension was signed in the previous height - Round: int64(0), - ChainId: suite.baseApp.ChainID(), - } - - bz, err := marshalDelimitedFn(&cve) - require.NoError(t, err) - - extSig, err := privkey.Sign(bz) - require.NoError(t, err) - - reqPrepareProposal = abci.RequestPrepareProposal{ - MaxTxBytes: 1000, - Height: 3, // this value can't be 0 - LocalLastCommit: abci.ExtendedCommitInfo{ - Round: 0, - Votes: []abci.ExtendedVoteInfo{ - { - Validator: abci.Validator{ - Address: consAddr.Bytes(), - Power: 666, - }, - VoteExtension: ext, - ExtensionSignature: extSig, - BlockIdFlag: cmtproto.BlockIDFlagCommit, - }, - }, - }, - } - resPrepareProposal, err = suite.baseApp.PrepareProposal(&reqPrepareProposal) - require.NoError(t, err) - require.Equal(t, 1, len(resPrepareProposal.Txs)) - - // now vote extensions but our sole voter doesn't reach majority - reqPrepareProposal = abci.RequestPrepareProposal{ - MaxTxBytes: 1000, - Height: 3, // this value can't be 0 - LocalLastCommit: abci.ExtendedCommitInfo{ - Round: 0, - Votes: []abci.ExtendedVoteInfo{ - { - Validator: abci.Validator{ - Address: consAddr.Bytes(), - Power: 666, - }, - VoteExtension: ext, - ExtensionSignature: extSig, - BlockIdFlag: cmtproto.BlockIDFlagNil, // This will ignore the vote extension - }, - }, - }, - } - resPrepareProposal, err = suite.baseApp.PrepareProposal(&reqPrepareProposal) - require.NoError(t, err) - require.Equal(t, 0, len(resPrepareProposal.Txs)) -} - -func TestABCI_ProcessProposal_PanicRecovery(t *testing.T) { - processOpt := func(app *baseapp.BaseApp) { - app.SetProcessProposal(func(ctx sdk.Context, rpp *abci.RequestProcessProposal) (*abci.ResponseProcessProposal, error) { - panic(errors.New("test")) - }) - } - suite := NewBaseAppSuite(t, processOpt) - - _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ - ConsensusParams: &cmtproto.ConsensusParams{}, - }) - require.NoError(t, err) - - require.NotPanics(t, func() { - res, err := suite.baseApp.ProcessProposal(&abci.RequestProcessProposal{Height: 1}) - require.NoError(t, err) - require.Equal(t, res.Status, abci.ResponseProcessProposal_REJECT) - }) -} - -// TestABCI_Proposal_Reset_State ensures that state is reset between runs of -// PrepareProposal and ProcessProposal in case they are called multiple times. -// This is only valid for heights > 1, given that on height 1 we always set the -// state to be deliverState. -func TestABCI_Proposal_Reset_State_Between_Calls(t *testing.T) { - someKey := []byte("some-key") - - prepareOpt := func(bapp *baseapp.BaseApp) { - bapp.SetPrepareProposal(func(ctx sdk.Context, req *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error) { - // This key should not exist given that we reset the state on every call. - require.False(t, ctx.KVStore(capKey1).Has(someKey)) - ctx.KVStore(capKey1).Set(someKey, someKey) - return &abci.ResponsePrepareProposal{Txs: req.Txs}, nil - }) - } - - processOpt := func(bapp *baseapp.BaseApp) { - bapp.SetProcessProposal(func(ctx sdk.Context, req *abci.RequestProcessProposal) (*abci.ResponseProcessProposal, error) { - // This key should not exist given that we reset the state on every call. - require.False(t, ctx.KVStore(capKey1).Has(someKey)) - ctx.KVStore(capKey1).Set(someKey, someKey) - return &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_ACCEPT}, nil - }) - } - - suite := NewBaseAppSuite(t, prepareOpt, processOpt) - - _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ - ConsensusParams: &cmtproto.ConsensusParams{}, - }) - require.NoError(t, err) - - reqPrepareProposal := abci.RequestPrepareProposal{ - MaxTxBytes: 1000, - Height: 2, // this value can't be 0 - } - - // Let's pretend something happened and PrepareProposal gets called many - // times, this must be safe to do. - for i := 0; i < 5; i++ { - resPrepareProposal, err := suite.baseApp.PrepareProposal(&reqPrepareProposal) - require.NoError(t, err) - require.Equal(t, 0, len(resPrepareProposal.Txs)) - } - - reqProposalTxBytes := [][]byte{} - reqProcessProposal := abci.RequestProcessProposal{ - Txs: reqProposalTxBytes, - Height: 2, - } - - // Let's pretend something happened and ProcessProposal gets called many - // times, this must be safe to do. - for i := 0; i < 5; i++ { - resProcessProposal, err := suite.baseApp.ProcessProposal(&reqProcessProposal) - require.NoError(t, err) - require.Equal(t, abci.ResponseProcessProposal_ACCEPT, resProcessProposal.Status) - } -} - -func TestABCI_HaltChain(t *testing.T) { - testCases := []struct { - name string - haltHeight uint64 - haltTime uint64 - blockHeight int64 - blockTime int64 - expHalt bool - }{ - {"default", 0, 0, 10, 0, false}, - {"halt-height-edge", 10, 0, 10, 0, false}, - {"halt-height", 10, 0, 11, 0, true}, - {"halt-time-edge", 0, 10, 1, 10, false}, - {"halt-time", 0, 10, 1, 11, true}, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - suite := NewBaseAppSuite(t, baseapp.SetHaltHeight(tc.haltHeight), baseapp.SetHaltTime(tc.haltTime)) - suite.baseApp.InitChain(&abci.RequestInitChain{ - ConsensusParams: &cmtproto.ConsensusParams{}, - InitialHeight: tc.blockHeight, - }) - - app := suite.baseApp - _, err := app.FinalizeBlock(&abci.RequestFinalizeBlock{ - Height: tc.blockHeight, - Time: time.Unix(tc.blockTime, 0), - }) - if !tc.expHalt { - require.NoError(t, err) - } else { - require.Error(t, err) - require.True(t, strings.HasPrefix(err.Error(), "halt per configuration")) - } - }) - } -} - -func TestBaseApp_PreBlocker(t *testing.T) { - db := dbm.NewMemDB() - name := t.Name() - logger := log.NewTestLogger(t) - - app := baseapp.NewBaseApp(name, logger, db, nil) - _, err := app.InitChain(&abci.RequestInitChain{}) - require.NoError(t, err) - - wasHookCalled := false - app.SetPreBlocker(func(ctx sdk.Context, req *abci.RequestFinalizeBlock) (*sdk.ResponsePreBlock, error) { - wasHookCalled = true - return &sdk.ResponsePreBlock{ - ConsensusParamsChanged: true, - }, nil - }) - app.Seal() - - _, err = app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1}) - require.NoError(t, err) - require.Equal(t, true, wasHookCalled) - - // Now try erroring - app = baseapp.NewBaseApp(name, logger, db, nil) - _, err = app.InitChain(&abci.RequestInitChain{}) - require.NoError(t, err) - - app.SetPreBlocker(func(ctx sdk.Context, req *abci.RequestFinalizeBlock) (*sdk.ResponsePreBlock, error) { - return nil, errors.New("some error") - }) - app.Seal() - - _, err = app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1}) - require.Error(t, err) -} - -// TestBaseApp_VoteExtensions tests vote extensions using a price as an example. -func TestBaseApp_VoteExtensions(t *testing.T) { - ctrl := gomock.NewController(t) - valStore := mock.NewMockValidatorStore(ctrl) - - // for brevity and simplicity, all validators have the same key - privKey := secp256k1.GenPrivKey() - pubKey := privKey.PubKey() - tmPk := cmtprotocrypto.PublicKey{ - Sum: &cmtprotocrypto.PublicKey_Secp256K1{ - Secp256K1: pubKey.Bytes(), - }, - } - valStore.EXPECT().GetPubKeyByConsAddr(gomock.Any(), gomock.Any()).Return(tmPk, nil).AnyTimes() - - baseappOpts := func(app *baseapp.BaseApp) { - app.SetExtendVoteHandler(func(sdk.Context, *abci.RequestExtendVote) (*abci.ResponseExtendVote, error) { - // here we would have a process to get the price from an external source - price := 10000000 + rand.Int63n(1000000) - ve := make([]byte, 8) - binary.BigEndian.PutUint64(ve, uint64(price)) - return &abci.ResponseExtendVote{VoteExtension: ve}, nil - }) - - app.SetVerifyVoteExtensionHandler(func(_ sdk.Context, req *abci.RequestVerifyVoteExtension) (*abci.ResponseVerifyVoteExtension, error) { - vePrice := binary.BigEndian.Uint64(req.VoteExtension) - // here we would do some price validation, must not be 0 and not too high - if vePrice > 11000000 || vePrice == 0 { - // usually application should always return ACCEPT unless they really want to discard the entire vote - return &abci.ResponseVerifyVoteExtension{Status: abci.ResponseVerifyVoteExtension_REJECT}, nil - } - - return &abci.ResponseVerifyVoteExtension{Status: abci.ResponseVerifyVoteExtension_ACCEPT}, nil - }) - - app.SetPrepareProposal(func(ctx sdk.Context, req *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error) { - txs := [][]byte{} - if err := baseapp.ValidateVoteExtensions(ctx, valStore, req.Height, app.ChainID(), req.LocalLastCommit); err != nil { - return nil, err - } - // add all VE as txs (in a real scenario we would need to check signatures too) - for _, v := range req.LocalLastCommit.Votes { - if len(v.VoteExtension) == 8 { - // pretend this is a way to check if the VE is valid - if binary.BigEndian.Uint64(v.VoteExtension) < 11000000 && binary.BigEndian.Uint64(v.VoteExtension) > 0 { - txs = append(txs, v.VoteExtension) - } - } - } - - return &abci.ResponsePrepareProposal{Txs: txs}, nil - }) - - app.SetProcessProposal(func(ctx sdk.Context, req *abci.RequestProcessProposal) (*abci.ResponseProcessProposal, error) { - // here we check if the proposal is valid, mainly if the vote extensions appended to the txs are valid - for _, v := range req.Txs { - // pretend this is a way to check if the tx is actually a VE - if len(v) == 8 { - // pretend this is a way to check if the VE is valid - if binary.BigEndian.Uint64(v) > 11000000 || binary.BigEndian.Uint64(v) == 0 { - return &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_REJECT}, nil - } - } - } - - return &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_ACCEPT}, nil - }) - - app.SetPreBlocker(func(ctx sdk.Context, req *abci.RequestFinalizeBlock) (*sdk.ResponsePreBlock, error) { - count := uint64(0) - pricesSum := uint64(0) - for _, v := range req.Txs { - // pretend this is a way to check if the tx is actually a VE - if len(v) == 8 { - count++ - pricesSum += binary.BigEndian.Uint64(v) - } - } - - if count > 0 { - // we process the average price and store it in the context to make it available for FinalizeBlock - avgPrice := pricesSum / count - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, avgPrice) - ctx.KVStore(capKey1).Set([]byte("avgPrice"), buf) - } - - return &sdk.ResponsePreBlock{ - ConsensusParamsChanged: true, - }, nil - }) - } - - suite := NewBaseAppSuite(t, baseappOpts) - - _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ - ConsensusParams: &cmtproto.ConsensusParams{ - Abci: &cmtproto.ABCIParams{ - VoteExtensionsEnableHeight: 1, - }, - }, - }) - require.NoError(t, err) - - allVEs := [][]byte{} - // simulate getting 10 vote extensions from 10 validators - for i := 0; i < 10; i++ { - ve, err := suite.baseApp.ExtendVote(context.TODO(), &abci.RequestExtendVote{Height: 1}) - require.NoError(t, err) - allVEs = append(allVEs, ve.VoteExtension) - } - - // add a couple of invalid vote extensions (in what regards to the check we are doing in VerifyVoteExtension/ProcessProposal) - // add a 0 price - ve := make([]byte, 8) - binary.BigEndian.PutUint64(ve, uint64(0)) - allVEs = append(allVEs, ve) - - // add a price too high - ve = make([]byte, 8) - binary.BigEndian.PutUint64(ve, uint64(13000000)) - allVEs = append(allVEs, ve) - - // verify all votes, only 10 should be accepted - successful := 0 - for _, v := range allVEs { - res, err := suite.baseApp.VerifyVoteExtension(&abci.RequestVerifyVoteExtension{ - Height: 1, - VoteExtension: v, - }) - require.NoError(t, err) - if res.Status == abci.ResponseVerifyVoteExtension_ACCEPT { - successful++ - } - } - require.Equal(t, 10, successful) - - prepPropReq := &abci.RequestPrepareProposal{ - Height: 1, - LocalLastCommit: abci.ExtendedCommitInfo{ - Round: 0, - Votes: []abci.ExtendedVoteInfo{}, - }, - } - - // add all VEs to the local last commit, which will make PrepareProposal fail - // because it's not expecting to receive vote extensions when height == VoteExtensionsEnableHeight - for _, ve := range allVEs { - prepPropReq.LocalLastCommit.Votes = append(prepPropReq.LocalLastCommit.Votes, abci.ExtendedVoteInfo{ - VoteExtension: ve, - BlockIdFlag: cmtproto.BlockIDFlagCommit, - ExtensionSignature: []byte{}, // doesn't matter, it's just to make the next PrepareProposal fail - }) - } - resp, err := suite.baseApp.PrepareProposal(prepPropReq) - require.Len(t, resp.Txs, 0) // this is actually a failure, but we don't want to halt the chain - require.NoError(t, err) // we don't error here - - prepPropReq.LocalLastCommit.Votes = []abci.ExtendedVoteInfo{} // reset votes - resp, err = suite.baseApp.PrepareProposal(prepPropReq) - require.NoError(t, err) - require.Len(t, resp.Txs, 0) - - procPropRes, err := suite.baseApp.ProcessProposal(&abci.RequestProcessProposal{Height: 1, Txs: resp.Txs}) - require.NoError(t, err) - require.Equal(t, abci.ResponseProcessProposal_ACCEPT, procPropRes.Status) - - _, err = suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1, Txs: resp.Txs}) - require.NoError(t, err) - - // The average price will be nil during the first block, given that we don't have - // any vote extensions on block 1 in PrepareProposal - avgPrice := getFinalizeBlockStateCtx(suite.baseApp).KVStore(capKey1).Get([]byte("avgPrice")) - require.Nil(t, avgPrice) - _, err = suite.baseApp.Commit() - require.NoError(t, err) - - // Now onto the second block, this time we process vote extensions from the - // previous block (which we sign now) - for _, ve := range allVEs { - cve := cmtproto.CanonicalVoteExtension{ - Extension: ve, - Height: 1, - Round: int64(0), - ChainId: suite.baseApp.ChainID(), - } - - bz, err := marshalDelimitedFn(&cve) - require.NoError(t, err) - - extSig, err := privKey.Sign(bz) - require.NoError(t, err) - - prepPropReq.LocalLastCommit.Votes = append(prepPropReq.LocalLastCommit.Votes, abci.ExtendedVoteInfo{ - VoteExtension: ve, - BlockIdFlag: cmtproto.BlockIDFlagCommit, - ExtensionSignature: extSig, - }) - } - - prepPropReq.Height = 2 - resp, err = suite.baseApp.PrepareProposal(prepPropReq) - require.NoError(t, err) - require.Len(t, resp.Txs, 10) - - procPropRes, err = suite.baseApp.ProcessProposal(&abci.RequestProcessProposal{Height: 2, Txs: resp.Txs}) - require.NoError(t, err) - require.Equal(t, abci.ResponseProcessProposal_ACCEPT, procPropRes.Status) - - _, err = suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 2, Txs: resp.Txs}) - require.NoError(t, err) - - // Check if the average price was available in FinalizeBlock's context - avgPrice = getFinalizeBlockStateCtx(suite.baseApp).KVStore(capKey1).Get([]byte("avgPrice")) - require.NotNil(t, avgPrice) - require.GreaterOrEqual(t, binary.BigEndian.Uint64(avgPrice), uint64(10000000)) - require.Less(t, binary.BigEndian.Uint64(avgPrice), uint64(11000000)) - - _, err = suite.baseApp.Commit() - require.NoError(t, err) - - // check if avgPrice was committed - committedAvgPrice := suite.baseApp.NewContext(true).KVStore(capKey1).Get([]byte("avgPrice")) - require.Equal(t, avgPrice, committedAvgPrice) -} - -func TestABCI_PrepareProposal_Panic(t *testing.T) { - prepareOpt := func(bapp *baseapp.BaseApp) { - bapp.SetPrepareProposal(func(ctx sdk.Context, req *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error) { - if len(req.Txs) == 3 { - panic("i don't like number 3, panic") - } - // return empty if no panic - return &abci.ResponsePrepareProposal{}, nil - }) - } - - suite := NewBaseAppSuite(t, prepareOpt) - - _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ - InitialHeight: 1, - ConsensusParams: &cmtproto.ConsensusParams{}, - }) - require.NoError(t, err) - - txs := [][]byte{{1}, {2}} - reqPrepareProposal := abci.RequestPrepareProposal{ - MaxTxBytes: 1000, - Height: 1, // this value can't be 0 - Txs: txs, - } - resPrepareProposal, err := suite.baseApp.PrepareProposal(&reqPrepareProposal) - require.NoError(t, err) - require.Equal(t, 0, len(resPrepareProposal.Txs)) - - // make it panic, and check if it returns 3 txs (because of panic recovery) - txs = [][]byte{{1}, {2}, {3}} - reqPrepareProposal.Txs = txs - resPrepareProposal, err = suite.baseApp.PrepareProposal(&reqPrepareProposal) - require.NoError(t, err) - require.Equal(t, 3, len(resPrepareProposal.Txs)) -} - -func TestOptimisticExecution(t *testing.T) { - suite := NewBaseAppSuite(t, baseapp.SetOptimisticExecution()) - - _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ - ConsensusParams: &cmtproto.ConsensusParams{}, - }) - require.NoError(t, err) - - // run 50 blocks - for i := 0; i < 50; i++ { - tx := newTxCounter(t, suite.txConfig, 0, 1) - txBytes, err := suite.txConfig.TxEncoder()(tx) - require.NoError(t, err) - - reqProcProp := abci.RequestProcessProposal{ - Txs: [][]byte{txBytes}, - Height: suite.baseApp.LastBlockHeight() + 1, - Hash: []byte("some-hash" + strconv.FormatInt(suite.baseApp.LastBlockHeight()+1, 10)), - } - - respProcProp, err := suite.baseApp.ProcessProposal(&reqProcProp) - require.Equal(t, abci.ResponseProcessProposal_ACCEPT, respProcProp.Status) - require.NoError(t, err) - - reqFinalizeBlock := abci.RequestFinalizeBlock{ - Height: reqProcProp.Height, - Txs: reqProcProp.Txs, - Hash: reqProcProp.Hash, - } - - respFinalizeBlock, err := suite.baseApp.FinalizeBlock(&reqFinalizeBlock) - require.NoError(t, err) - require.Len(t, respFinalizeBlock.TxResults, 1) - - _, err = suite.baseApp.Commit() - require.NoError(t, err) - } - - require.Equal(t, int64(50), suite.baseApp.LastBlockHeight()) -} diff --git a/baseapp/abci_utils.go b/baseapp/abci_utils.go deleted file mode 100644 index 0b570ab52a..0000000000 --- a/baseapp/abci_utils.go +++ /dev/null @@ -1,398 +0,0 @@ -package baseapp - -import ( - "bytes" - "context" - "fmt" - - "github.com/cockroachdb/errors" - abci "github.com/cometbft/cometbft/abci/types" - cryptoenc "github.com/cometbft/cometbft/crypto/encoding" - cmtprotocrypto "github.com/cometbft/cometbft/proto/tendermint/crypto" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" - protoio "github.com/cosmos/gogoproto/io" - "github.com/cosmos/gogoproto/proto" - - "cosmossdk.io/math" - - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/mempool" -) - -// VoteExtensionThreshold defines the total voting power % that must be -// submitted in order for all vote extensions to be considered valid for a -// given height. -var VoteExtensionThreshold = math.LegacyNewDecWithPrec(667, 3) - -type ( - // ValidatorStore defines the interface contract require for verifying vote - // extension signatures. Typically, this will be implemented by the x/staking - // module, which has knowledge of the CometBFT public key. - ValidatorStore interface { - GetPubKeyByConsAddr(context.Context, sdk.ConsAddress) (cmtprotocrypto.PublicKey, error) - } - - // GasTx defines the contract that a transaction with a gas limit must implement. - GasTx interface { - GetGas() uint64 - } -) - -// ValidateVoteExtensions defines a helper function for verifying vote extension -// signatures that may be passed or manually injected into a block proposal from -// a proposer in PrepareProposal. It returns an error if any signature is invalid -// or if unexpected vote extensions and/or signatures are found or less than 2/3 -// power is received. -func ValidateVoteExtensions( - ctx sdk.Context, - valStore ValidatorStore, - currentHeight int64, - chainID string, - extCommit abci.ExtendedCommitInfo, -) error { - cp := ctx.ConsensusParams() - // Start checking vote extensions only **after** the vote extensions enable - // height, because when `currentHeight == VoteExtensionsEnableHeight` - // PrepareProposal doesn't get any vote extensions in its request. - extsEnabled := cp.Abci != nil && currentHeight > cp.Abci.VoteExtensionsEnableHeight && cp.Abci.VoteExtensionsEnableHeight != 0 - marshalDelimitedFn := func(msg proto.Message) ([]byte, error) { - var buf bytes.Buffer - if err := protoio.NewDelimitedWriter(&buf).WriteMsg(msg); err != nil { - return nil, err - } - - return buf.Bytes(), nil - } - - var ( - // Total voting power of all vote extensions. - totalVP int64 - // Total voting power of all validators that submitted valid vote extensions. - sumVP int64 - ) - - for _, vote := range extCommit.Votes { - totalVP += vote.Validator.Power - - // Only check + include power if the vote is a commit vote. There must be super-majority, otherwise the - // previous block (the block vote is for) could not have been committed. - if vote.BlockIdFlag != cmtproto.BlockIDFlagCommit { - continue - } - - if !extsEnabled { - if len(vote.VoteExtension) > 0 { - return fmt.Errorf("vote extensions disabled; received non-empty vote extension at height %d", currentHeight) - } - if len(vote.ExtensionSignature) > 0 { - return fmt.Errorf("vote extensions disabled; received non-empty vote extension signature at height %d", currentHeight) - } - - continue - } - - if len(vote.ExtensionSignature) == 0 { - return fmt.Errorf("vote extensions enabled; received empty vote extension signature at height %d", currentHeight) - } - - valConsAddr := sdk.ConsAddress(vote.Validator.Address) - pubKeyProto, err := valStore.GetPubKeyByConsAddr(ctx, valConsAddr) - if err != nil { - return fmt.Errorf("failed to get validator %X public key: %w", valConsAddr, err) - } - - cmtPubKey, err := cryptoenc.PubKeyFromProto(pubKeyProto) - if err != nil { - return fmt.Errorf("failed to convert validator %X public key: %w", valConsAddr, err) - } - - cve := cmtproto.CanonicalVoteExtension{ - Extension: vote.VoteExtension, - Height: currentHeight - 1, // the vote extension was signed in the previous height - Round: int64(extCommit.Round), - ChainId: chainID, - } - - extSignBytes, err := marshalDelimitedFn(&cve) - if err != nil { - return fmt.Errorf("failed to encode CanonicalVoteExtension: %w", err) - } - - if !cmtPubKey.VerifySignature(extSignBytes, vote.ExtensionSignature) { - return fmt.Errorf("failed to verify validator %X vote extension signature", valConsAddr) - } - - sumVP += vote.Validator.Power - } - - if totalVP > 0 { - percentSubmitted := math.LegacyNewDecFromInt(math.NewInt(sumVP)).Quo(math.LegacyNewDecFromInt(math.NewInt(totalVP))) - if percentSubmitted.LT(VoteExtensionThreshold) { - return fmt.Errorf("insufficient cumulative voting power received to verify vote extensions; got: %s, expected: >=%s", percentSubmitted, VoteExtensionThreshold) - } - } - - return nil -} - -type ( - // ProposalTxVerifier defines the interface that is implemented by BaseApp, - // that any custom ABCI PrepareProposal and ProcessProposal handler can use - // to verify a transaction. - ProposalTxVerifier interface { - PrepareProposalVerifyTx(tx sdk.Tx) ([]byte, error) - ProcessProposalVerifyTx(txBz []byte) (sdk.Tx, error) - TxDecode(txBz []byte) (sdk.Tx, error) - TxEncode(tx sdk.Tx) ([]byte, error) - } - - // DefaultProposalHandler defines the default ABCI PrepareProposal and - // ProcessProposal handlers. - DefaultProposalHandler struct { - mempool mempool.Mempool - txVerifier ProposalTxVerifier - txSelector TxSelector - } -) - -func NewDefaultProposalHandler(mp mempool.Mempool, txVerifier ProposalTxVerifier) *DefaultProposalHandler { - return &DefaultProposalHandler{ - mempool: mp, - txVerifier: txVerifier, - txSelector: NewDefaultTxSelector(), - } -} - -// SetTxSelector sets the TxSelector function on the DefaultProposalHandler. -func (h *DefaultProposalHandler) SetTxSelector(ts TxSelector) { - h.txSelector = ts -} - -// PrepareProposalHandler returns the default implementation for processing an -// ABCI proposal. The application's mempool is enumerated and all valid -// transactions are added to the proposal. Transactions are valid if they: -// -// 1) Successfully encode to bytes. -// 2) Are valid (i.e. pass runTx, AnteHandler only). -// -// Enumeration is halted once RequestPrepareProposal.MaxBytes of transactions is -// reached or the mempool is exhausted. -// -// Note: -// -// - Step (2) is identical to the validation step performed in -// DefaultProcessProposal. It is very important that the same validation logic -// is used in both steps, and applications must ensure that this is the case in -// non-default handlers. -// -// - If no mempool is set or if the mempool is a no-op mempool, the transactions -// requested from CometBFT will simply be returned, which, by default, are in -// FIFO order. -func (h *DefaultProposalHandler) PrepareProposalHandler() sdk.PrepareProposalHandler { - return func(ctx sdk.Context, req *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error) { - var maxBlockGas uint64 - if b := ctx.ConsensusParams().Block; b != nil { - maxBlockGas = uint64(b.MaxGas) - } - - defer h.txSelector.Clear() - - // If the mempool is nil or NoOp we simply return the transactions - // requested from CometBFT, which, by default, should be in FIFO order. - // - // Note, we still need to ensure the transactions returned respect req.MaxTxBytes. - _, isNoOp := h.mempool.(mempool.NoOpMempool) - if h.mempool == nil || isNoOp { - for _, txBz := range req.Txs { - tx, err := h.txVerifier.TxDecode(txBz) - if err != nil { - return nil, err - } - - stop := h.txSelector.SelectTxForProposal(ctx, uint64(req.MaxTxBytes), maxBlockGas, tx, txBz) - if stop { - break - } - } - - return &abci.ResponsePrepareProposal{Txs: h.txSelector.SelectedTxs(ctx)}, nil - } - - iterator := h.mempool.Select(ctx, req.Txs) - for iterator != nil { - memTx := iterator.Tx() - - // NOTE: Since transaction verification was already executed in CheckTx, - // which calls mempool.Insert, in theory everything in the pool should be - // valid. But some mempool implementations may insert invalid txs, so we - // check again. - txBz, err := h.txVerifier.PrepareProposalVerifyTx(memTx) - if err != nil { - err := h.mempool.Remove(memTx) - if err != nil && !errors.Is(err, mempool.ErrTxNotFound) { - return nil, err - } - } else { - stop := h.txSelector.SelectTxForProposal(ctx, uint64(req.MaxTxBytes), maxBlockGas, memTx, txBz) - if stop { - break - } - } - - iterator = iterator.Next() - } - - return &abci.ResponsePrepareProposal{Txs: h.txSelector.SelectedTxs(ctx)}, nil - } -} - -// ProcessProposalHandler returns the default implementation for processing an -// ABCI proposal. Every transaction in the proposal must pass 2 conditions: -// -// 1. The transaction bytes must decode to a valid transaction. -// 2. The transaction must be valid (i.e. pass runTx, AnteHandler only) -// -// If any transaction fails to pass either condition, the proposal is rejected. -// Note that step (2) is identical to the validation step performed in -// DefaultPrepareProposal. It is very important that the same validation logic -// is used in both steps, and applications must ensure that this is the case in -// non-default handlers. -func (h *DefaultProposalHandler) ProcessProposalHandler() sdk.ProcessProposalHandler { - // If the mempool is nil or NoOp we simply return ACCEPT, - // because PrepareProposal may have included txs that could fail verification. - _, isNoOp := h.mempool.(mempool.NoOpMempool) - if h.mempool == nil || isNoOp { - return NoOpProcessProposal() - } - - return func(ctx sdk.Context, req *abci.RequestProcessProposal) (*abci.ResponseProcessProposal, error) { - var totalTxGas uint64 - - var maxBlockGas int64 - if b := ctx.ConsensusParams().Block; b != nil { - maxBlockGas = b.MaxGas - } - - for _, txBytes := range req.Txs { - tx, err := h.txVerifier.ProcessProposalVerifyTx(txBytes) - if err != nil { - return &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_REJECT}, nil - } - - if maxBlockGas > 0 { - gasTx, ok := tx.(GasTx) - if ok { - totalTxGas += gasTx.GetGas() - } - - if totalTxGas > uint64(maxBlockGas) { - return &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_REJECT}, nil - } - } - } - - return &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_ACCEPT}, nil - } -} - -// NoOpPrepareProposal defines a no-op PrepareProposal handler. It will always -// return the transactions sent by the client's request. -func NoOpPrepareProposal() sdk.PrepareProposalHandler { - return func(_ sdk.Context, req *abci.RequestPrepareProposal) (*abci.ResponsePrepareProposal, error) { - return &abci.ResponsePrepareProposal{Txs: req.Txs}, nil - } -} - -// NoOpProcessProposal defines a no-op ProcessProposal Handler. It will always -// return ACCEPT. -func NoOpProcessProposal() sdk.ProcessProposalHandler { - return func(_ sdk.Context, _ *abci.RequestProcessProposal) (*abci.ResponseProcessProposal, error) { - return &abci.ResponseProcessProposal{Status: abci.ResponseProcessProposal_ACCEPT}, nil - } -} - -// NoOpExtendVote defines a no-op ExtendVote handler. It will always return an -// empty byte slice as the vote extension. -func NoOpExtendVote() sdk.ExtendVoteHandler { - return func(_ sdk.Context, _ *abci.RequestExtendVote) (*abci.ResponseExtendVote, error) { - return &abci.ResponseExtendVote{VoteExtension: []byte{}}, nil - } -} - -// NoOpVerifyVoteExtensionHandler defines a no-op VerifyVoteExtension handler. It -// will always return an ACCEPT status with no error. -func NoOpVerifyVoteExtensionHandler() sdk.VerifyVoteExtensionHandler { - return func(_ sdk.Context, _ *abci.RequestVerifyVoteExtension) (*abci.ResponseVerifyVoteExtension, error) { - return &abci.ResponseVerifyVoteExtension{Status: abci.ResponseVerifyVoteExtension_ACCEPT}, nil - } -} - -// TxSelector defines a helper type that assists in selecting transactions during -// mempool transaction selection in PrepareProposal. It keeps track of the total -// number of bytes and total gas of the selected transactions. It also keeps -// track of the selected transactions themselves. -type TxSelector interface { - // SelectedTxs should return a copy of the selected transactions. - SelectedTxs(ctx context.Context) [][]byte - - // Clear should clear the TxSelector, nulling out all relevant fields. - Clear() - - // SelectTxForProposal should attempt to select a transaction for inclusion in - // a proposal based on inclusion criteria defined by the TxSelector. It must - // return if the caller should halt the transaction selection loop - // (typically over a mempool) or otherwise. - SelectTxForProposal(ctx context.Context, maxTxBytes, maxBlockGas uint64, memTx sdk.Tx, txBz []byte) bool -} - -type defaultTxSelector struct { - totalTxBytes uint64 - totalTxGas uint64 - selectedTxs [][]byte -} - -func NewDefaultTxSelector() TxSelector { - return &defaultTxSelector{} -} - -func (ts *defaultTxSelector) SelectedTxs(_ context.Context) [][]byte { - txs := make([][]byte, len(ts.selectedTxs)) - copy(txs, ts.selectedTxs) - return txs -} - -func (ts *defaultTxSelector) Clear() { - ts.totalTxBytes = 0 - ts.totalTxGas = 0 - ts.selectedTxs = nil -} - -func (ts *defaultTxSelector) SelectTxForProposal(_ context.Context, maxTxBytes, maxBlockGas uint64, memTx sdk.Tx, txBz []byte) bool { - txSize := uint64(len(txBz)) - - var txGasLimit uint64 - if memTx != nil { - if gasTx, ok := memTx.(GasTx); ok { - txGasLimit = gasTx.GetGas() - } - } - - // only add the transaction to the proposal if we have enough capacity - if (txSize + ts.totalTxBytes) <= maxTxBytes { - // If there is a max block gas limit, add the tx only if the limit has - // not been met. - if maxBlockGas > 0 { - if (txGasLimit + ts.totalTxGas) <= maxBlockGas { - ts.totalTxGas += txGasLimit - ts.totalTxBytes += txSize - ts.selectedTxs = append(ts.selectedTxs, txBz) - } - } else { - ts.totalTxBytes += txSize - ts.selectedTxs = append(ts.selectedTxs, txBz) - } - } - - // check if we've reached capacity; if so, we cannot select any more transactions - return ts.totalTxBytes >= maxTxBytes || (maxBlockGas > 0 && (ts.totalTxGas >= maxBlockGas)) -} diff --git a/baseapp/abci_utils_test.go b/baseapp/abci_utils_test.go deleted file mode 100644 index 8919ee81ba..0000000000 --- a/baseapp/abci_utils_test.go +++ /dev/null @@ -1,379 +0,0 @@ -package baseapp_test - -import ( - "bytes" - "testing" - - abci "github.com/cometbft/cometbft/abci/types" - "github.com/cometbft/cometbft/crypto/secp256k1" - cmtprotocrypto "github.com/cometbft/cometbft/proto/tendermint/crypto" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" - dbm "github.com/cosmos/cosmos-db" - protoio "github.com/cosmos/gogoproto/io" - "github.com/cosmos/gogoproto/proto" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/suite" - - "cosmossdk.io/log" - - "github.com/cosmos/cosmos-sdk/baseapp" - baseapptestutil "github.com/cosmos/cosmos-sdk/baseapp/testutil" - "github.com/cosmos/cosmos-sdk/baseapp/testutil/mock" - codectestutil "github.com/cosmos/cosmos-sdk/codec/testutil" - "github.com/cosmos/cosmos-sdk/testutil/testdata" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/mempool" - authtx "github.com/cosmos/cosmos-sdk/x/auth/tx" -) - -const ( - chainID = "chain-id" -) - -type testValidator struct { - consAddr sdk.ConsAddress - tmPk cmtprotocrypto.PublicKey - privKey secp256k1.PrivKey -} - -func newTestValidator() testValidator { - privkey := secp256k1.GenPrivKey() - pubkey := privkey.PubKey() - tmPk := cmtprotocrypto.PublicKey{ - Sum: &cmtprotocrypto.PublicKey_Secp256K1{ - Secp256K1: pubkey.Bytes(), - }, - } - - return testValidator{ - consAddr: sdk.ConsAddress(pubkey.Address()), - tmPk: tmPk, - privKey: privkey, - } -} - -func (t testValidator) toValidator(power int64) abci.Validator { - return abci.Validator{ - Address: t.consAddr.Bytes(), - Power: power, - } -} - -type ABCIUtilsTestSuite struct { - suite.Suite - - valStore *mock.MockValidatorStore - vals [3]testValidator - ctx sdk.Context -} - -func NewABCIUtilsTestSuite(t *testing.T) *ABCIUtilsTestSuite { - t.Helper() - // create 3 validators - s := &ABCIUtilsTestSuite{ - vals: [3]testValidator{ - newTestValidator(), - newTestValidator(), - newTestValidator(), - }, - } - - // create mock - ctrl := gomock.NewController(t) - valStore := mock.NewMockValidatorStore(ctrl) - s.valStore = valStore - - // set up mock - s.valStore.EXPECT().GetPubKeyByConsAddr(gomock.Any(), s.vals[0].consAddr.Bytes()).Return(s.vals[0].tmPk, nil).AnyTimes() - s.valStore.EXPECT().GetPubKeyByConsAddr(gomock.Any(), s.vals[1].consAddr.Bytes()).Return(s.vals[1].tmPk, nil).AnyTimes() - s.valStore.EXPECT().GetPubKeyByConsAddr(gomock.Any(), s.vals[2].consAddr.Bytes()).Return(s.vals[2].tmPk, nil).AnyTimes() - - // create context - s.ctx = sdk.Context{}.WithConsensusParams(cmtproto.ConsensusParams{ - Abci: &cmtproto.ABCIParams{ - VoteExtensionsEnableHeight: 2, - }, - }) - return s -} - -func TestABCIUtilsTestSuite(t *testing.T) { - suite.Run(t, NewABCIUtilsTestSuite(t)) -} - -// check ValidateVoteExtensions works when all nodes have CommitBlockID votes -func (s *ABCIUtilsTestSuite) TestValidateVoteExtensionsHappyPath() { - ext := []byte("vote-extension") - cve := cmtproto.CanonicalVoteExtension{ - Extension: ext, - Height: 2, - Round: int64(0), - ChainId: chainID, - } - - bz, err := marshalDelimitedFn(&cve) - s.Require().NoError(err) - - extSig0, err := s.vals[0].privKey.Sign(bz) - s.Require().NoError(err) - - extSig1, err := s.vals[1].privKey.Sign(bz) - s.Require().NoError(err) - - extSig2, err := s.vals[2].privKey.Sign(bz) - s.Require().NoError(err) - - llc := abci.ExtendedCommitInfo{ - Round: 0, - Votes: []abci.ExtendedVoteInfo{ - { - Validator: s.vals[0].toValidator(333), - VoteExtension: ext, - ExtensionSignature: extSig0, - BlockIdFlag: cmtproto.BlockIDFlagCommit, - }, - { - Validator: s.vals[1].toValidator(333), - VoteExtension: ext, - ExtensionSignature: extSig1, - BlockIdFlag: cmtproto.BlockIDFlagCommit, - }, - { - Validator: s.vals[2].toValidator(334), - VoteExtension: ext, - ExtensionSignature: extSig2, - BlockIdFlag: cmtproto.BlockIDFlagCommit, - }, - }, - } - // expect-pass (votes of height 2 are included in next block) - s.Require().NoError(baseapp.ValidateVoteExtensions(s.ctx, s.valStore, 3, chainID, llc)) -} - -// check ValidateVoteExtensions works when a single node has submitted a BlockID_Absent -func (s *ABCIUtilsTestSuite) TestValidateVoteExtensionsSingleVoteAbsent() { - ext := []byte("vote-extension") - cve := cmtproto.CanonicalVoteExtension{ - Extension: ext, - Height: 2, - Round: int64(0), - ChainId: chainID, - } - - bz, err := marshalDelimitedFn(&cve) - s.Require().NoError(err) - - extSig0, err := s.vals[0].privKey.Sign(bz) - s.Require().NoError(err) - - extSig2, err := s.vals[2].privKey.Sign(bz) - s.Require().NoError(err) - - llc := abci.ExtendedCommitInfo{ - Round: 0, - Votes: []abci.ExtendedVoteInfo{ - { - Validator: s.vals[0].toValidator(333), - VoteExtension: ext, - ExtensionSignature: extSig0, - BlockIdFlag: cmtproto.BlockIDFlagCommit, - }, - // validator of power <1/3 is missing, so commit-info shld still be valid - { - Validator: s.vals[1].toValidator(333), - BlockIdFlag: cmtproto.BlockIDFlagAbsent, - }, - { - Validator: s.vals[2].toValidator(334), - VoteExtension: ext, - ExtensionSignature: extSig2, - BlockIdFlag: cmtproto.BlockIDFlagCommit, - }, - }, - } - // expect-pass (votes of height 2 are included in next block) - s.Require().NoError(baseapp.ValidateVoteExtensions(s.ctx, s.valStore, 3, chainID, llc)) -} - -// check ValidateVoteExtensions works when a single node has submitted a BlockID_Nil -func (s *ABCIUtilsTestSuite) TestValidateVoteExtensionsSingleVoteNil() { - ext := []byte("vote-extension") - cve := cmtproto.CanonicalVoteExtension{ - Extension: ext, - Height: 2, - Round: int64(0), - ChainId: chainID, - } - - bz, err := marshalDelimitedFn(&cve) - s.Require().NoError(err) - - extSig0, err := s.vals[0].privKey.Sign(bz) - s.Require().NoError(err) - - extSig2, err := s.vals[2].privKey.Sign(bz) - s.Require().NoError(err) - - llc := abci.ExtendedCommitInfo{ - Round: 0, - Votes: []abci.ExtendedVoteInfo{ - { - Validator: s.vals[0].toValidator(333), - VoteExtension: ext, - ExtensionSignature: extSig0, - BlockIdFlag: cmtproto.BlockIDFlagCommit, - }, - // validator of power <1/3 is missing, so commit-info should still be valid - { - Validator: s.vals[1].toValidator(333), - BlockIdFlag: cmtproto.BlockIDFlagNil, - }, - { - Validator: s.vals[2].toValidator(334), - VoteExtension: ext, - ExtensionSignature: extSig2, - BlockIdFlag: cmtproto.BlockIDFlagCommit, - }, - }, - } - // expect-pass (votes of height 2 are included in next block) - s.Require().NoError(baseapp.ValidateVoteExtensions(s.ctx, s.valStore, 3, chainID, llc)) -} - -// check ValidateVoteExtensions works when two nodes have submitted a BlockID_Nil / BlockID_Absent -func (s *ABCIUtilsTestSuite) TestValidateVoteExtensionsTwoVotesNilAbsent() { - ext := []byte("vote-extension") - cve := cmtproto.CanonicalVoteExtension{ - Extension: ext, - Height: 2, - Round: int64(0), - ChainId: chainID, - } - - bz, err := marshalDelimitedFn(&cve) - s.Require().NoError(err) - - extSig0, err := s.vals[0].privKey.Sign(bz) - s.Require().NoError(err) - - llc := abci.ExtendedCommitInfo{ - Round: 0, - Votes: []abci.ExtendedVoteInfo{ - // validator of power >2/3 is missing, so commit-info should not be valid - { - Validator: s.vals[0].toValidator(333), - BlockIdFlag: cmtproto.BlockIDFlagCommit, - VoteExtension: ext, - ExtensionSignature: extSig0, - }, - { - Validator: s.vals[1].toValidator(333), - BlockIdFlag: cmtproto.BlockIDFlagNil, - }, - { - Validator: s.vals[2].toValidator(334), - VoteExtension: ext, - BlockIdFlag: cmtproto.BlockIDFlagAbsent, - }, - }, - } - - // expect-pass (votes of height 2 are included in next block) - s.Require().Error(baseapp.ValidateVoteExtensions(s.ctx, s.valStore, 3, chainID, llc)) -} - -func (s *ABCIUtilsTestSuite) TestDefaultProposalHandler_NoOpMempoolTxSelection() { - // create a codec for marshaling - cdc := codectestutil.CodecOptions{}.NewCodec() - baseapptestutil.RegisterInterfaces(cdc.InterfaceRegistry()) - - // create a baseapp along with a tx config for tx generation - txConfig := authtx.NewTxConfig(cdc, authtx.DefaultSignModes) - app := baseapp.NewBaseApp(s.T().Name(), log.NewNopLogger(), dbm.NewMemDB(), txConfig.TxDecoder()) - - // create a proposal handler - ph := baseapp.NewDefaultProposalHandler(mempool.NoOpMempool{}, app) - handler := ph.PrepareProposalHandler() - - // build a tx - _, _, addr := testdata.KeyTestPubAddr() - builder := txConfig.NewTxBuilder() - s.Require().NoError(builder.SetMsgs( - &baseapptestutil.MsgCounter{Counter: 0, FailOnHandler: false, Signer: addr.String()}, - )) - builder.SetGasLimit(100) - setTxSignature(s.T(), builder, 0) - - // encode the tx to be used in the proposal request - tx := builder.GetTx() - txBz, err := txConfig.TxEncoder()(tx) - s.Require().NoError(err) - s.Require().Len(txBz, 152) - - testCases := map[string]struct { - ctx sdk.Context - req *abci.RequestPrepareProposal - expectedTxs int - }{ - "small max tx bytes": { - ctx: s.ctx, - req: &abci.RequestPrepareProposal{ - Txs: [][]byte{txBz, txBz, txBz, txBz, txBz}, - MaxTxBytes: 10, - }, - expectedTxs: 0, - }, - "small max gas": { - ctx: s.ctx.WithConsensusParams(cmtproto.ConsensusParams{ - Block: &cmtproto.BlockParams{ - MaxGas: 10, - }, - }), - req: &abci.RequestPrepareProposal{ - Txs: [][]byte{txBz, txBz, txBz, txBz, txBz}, - MaxTxBytes: 456, - }, - expectedTxs: 0, - }, - "large max tx bytes": { - ctx: s.ctx, - req: &abci.RequestPrepareProposal{ - Txs: [][]byte{txBz, txBz, txBz, txBz, txBz}, - MaxTxBytes: 456, - }, - expectedTxs: 3, - }, - "max gas and tx bytes": { - ctx: s.ctx.WithConsensusParams(cmtproto.ConsensusParams{ - Block: &cmtproto.BlockParams{ - MaxGas: 200, - }, - }), - req: &abci.RequestPrepareProposal{ - Txs: [][]byte{txBz, txBz, txBz, txBz, txBz}, - MaxTxBytes: 456, - }, - expectedTxs: 2, - }, - } - - for name, tc := range testCases { - s.Run(name, func() { - // iterate multiple times to ensure the tx selector is cleared each time - for i := 0; i < 5; i++ { - resp, err := handler(tc.ctx, tc.req) - s.Require().NoError(err) - s.Require().Len(resp.Txs, tc.expectedTxs) - } - }) - } -} - -func marshalDelimitedFn(msg proto.Message) ([]byte, error) { - var buf bytes.Buffer - if err := protoio.NewDelimitedWriter(&buf).WriteMsg(msg); err != nil { - return nil, err - } - - return buf.Bytes(), nil -} diff --git a/baseapp/baseapp.go b/baseapp/baseapp.go deleted file mode 100644 index 913481f26e..0000000000 --- a/baseapp/baseapp.go +++ /dev/null @@ -1,1131 +0,0 @@ -package baseapp - -import ( - "context" - "fmt" - "math" - "sort" - "strconv" - - "github.com/cockroachdb/errors" - abci "github.com/cometbft/cometbft/abci/types" - "github.com/cometbft/cometbft/crypto/tmhash" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" - dbm "github.com/cosmos/cosmos-db" - "github.com/cosmos/gogoproto/proto" - "golang.org/x/exp/maps" - protov2 "google.golang.org/protobuf/proto" - - errorsmod "cosmossdk.io/errors" - "cosmossdk.io/log" - "cosmossdk.io/store" - storemetrics "cosmossdk.io/store/metrics" - "cosmossdk.io/store/snapshots" - storetypes "cosmossdk.io/store/types" - - "github.com/cosmos/cosmos-sdk/baseapp/oe" - "github.com/cosmos/cosmos-sdk/codec" - codectypes "github.com/cosmos/cosmos-sdk/codec/types" - servertypes "github.com/cosmos/cosmos-sdk/server/types" - "github.com/cosmos/cosmos-sdk/telemetry" - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" - "github.com/cosmos/cosmos-sdk/types/mempool" -) - -type ( - execMode uint8 - - // StoreLoader defines a customizable function to control how we load the - // CommitMultiStore from disk. This is useful for state migration, when - // loading a datastore written with an older version of the software. In - // particular, if a module changed the substore key name (or removed a substore) - // between two versions of the software. - StoreLoader func(ms storetypes.CommitMultiStore) error -) - -const ( - execModeCheck execMode = iota // Check a transaction - execModeReCheck // Recheck a (pending) transaction after a commit - execModeSimulate // Simulate a transaction - execModePrepareProposal // Prepare a block proposal - execModeProcessProposal // Process a block proposal - execModeVoteExtension // Extend or verify a pre-commit vote - execModeFinalize // Finalize a block proposal -) - -var _ servertypes.ABCI = (*BaseApp)(nil) - -// BaseApp reflects the ABCI application implementation. -type BaseApp struct { - // initialized on creation - logger log.Logger - name string // application name from abci.BlockInfo - db dbm.DB // common DB backend - cms storetypes.CommitMultiStore // Main (uncached) state - qms storetypes.MultiStore // Optional alternative multistore for querying only. - storeLoader StoreLoader // function to handle store loading, may be overridden with SetStoreLoader() - grpcQueryRouter *GRPCQueryRouter // router for redirecting gRPC query calls - msgServiceRouter *MsgServiceRouter // router for redirecting Msg service messages - interfaceRegistry codectypes.InterfaceRegistry - txDecoder sdk.TxDecoder // unmarshal []byte into sdk.Tx - txEncoder sdk.TxEncoder // marshal sdk.Tx into []byte - - mempool mempool.Mempool // application side mempool - anteHandler sdk.AnteHandler // ante handler for fee and auth - postHandler sdk.PostHandler // post handler, optional - - initChainer sdk.InitChainer // ABCI InitChain handler - preBlocker sdk.PreBlocker // logic to run before BeginBlocker - beginBlocker sdk.BeginBlocker // (legacy ABCI) BeginBlock handler - endBlocker sdk.EndBlocker // (legacy ABCI) EndBlock handler - processProposal sdk.ProcessProposalHandler // ABCI ProcessProposal handler - prepareProposal sdk.PrepareProposalHandler // ABCI PrepareProposal - extendVote sdk.ExtendVoteHandler // ABCI ExtendVote handler - verifyVoteExt sdk.VerifyVoteExtensionHandler // ABCI VerifyVoteExtension handler - prepareCheckStater sdk.PrepareCheckStater // logic to run during commit using the checkState - precommiter sdk.Precommiter // logic to run during commit using the deliverState - - addrPeerFilter sdk.PeerFilter // filter peers by address and port - idPeerFilter sdk.PeerFilter // filter peers by node ID - fauxMerkleMode bool // if true, IAVL MountStores uses MountStoresDB for simulation speed. - - // manages snapshots, i.e. dumps of app state at certain intervals - snapshotManager *snapshots.Manager - - // volatile states: - // - // - checkState is set on InitChain and reset on Commit - // - finalizeBlockState is set on InitChain and FinalizeBlock and set to nil - // on Commit. - // - // - checkState: Used for CheckTx, which is set based on the previous block's - // state. This state is never committed. - // - // - prepareProposalState: Used for PrepareProposal, which is set based on the - // previous block's state. This state is never committed. In case of multiple - // consensus rounds, the state is always reset to the previous block's state. - // - // - processProposalState: Used for ProcessProposal, which is set based on the - // the previous block's state. This state is never committed. In case of - // multiple rounds, the state is always reset to the previous block's state. - // - // - finalizeBlockState: Used for FinalizeBlock, which is set based on the - // previous block's state. This state is committed. - checkState *state - prepareProposalState *state - processProposalState *state - finalizeBlockState *state - - // An inter-block write-through cache provided to the context during the ABCI - // FinalizeBlock call. - interBlockCache storetypes.MultiStorePersistentCache - - // paramStore is used to query for ABCI consensus parameters from an - // application parameter store. - paramStore ParamStore - - // queryGasLimit defines the maximum gas for queries; unbounded if 0. - queryGasLimit uint64 - - // The minimum gas prices a validator is willing to accept for processing a - // transaction. This is mainly used for DoS and spam prevention. - minGasPrices sdk.DecCoins - - // initialHeight is the initial height at which we start the BaseApp - initialHeight int64 - - // flag for sealing options and parameters to a BaseApp - sealed bool - - // block height at which to halt the chain and gracefully shutdown - haltHeight uint64 - - // minimum block time (in Unix seconds) at which to halt the chain and gracefully shutdown - haltTime uint64 - - // minRetainBlocks defines the minimum block height offset from the current - // block being committed, such that all blocks past this offset are pruned - // from CometBFT. It is used as part of the process of determining the - // ResponseCommit.RetainHeight value during ABCI Commit. A value of 0 indicates - // that no blocks should be pruned. - // - // Note: CometBFT block pruning is dependant on this parameter in conjunction - // with the unbonding (safety threshold) period, state pruning and state sync - // snapshot parameters to determine the correct minimum value of - // ResponseCommit.RetainHeight. - minRetainBlocks uint64 - - // application's version string - version string - - // application's protocol version that increments on every upgrade - // if BaseApp is passed to the upgrade keeper's NewKeeper method. - appVersion uint64 - - // recovery handler for app.runTx method - runTxRecoveryMiddleware recoveryMiddleware - - // trace set will return full stack traces for errors in ABCI Log field - trace bool - - // indexEvents defines the set of events in the form {eventType}.{attributeKey}, - // which informs CometBFT what to index. If empty, all events will be indexed. - indexEvents map[string]struct{} - - // streamingManager for managing instances and configuration of ABCIListener services - streamingManager storetypes.StreamingManager - - chainID string - - cdc codec.Codec - - // optimisticExec contains the context required for Optimistic Execution, - // including the goroutine handling.This is experimental and must be enabled - // by developers. - optimisticExec *oe.OptimisticExecution -} - -// NewBaseApp returns a reference to an initialized BaseApp. It accepts a -// variadic number of option functions, which act on the BaseApp to set -// configuration choices. -func NewBaseApp( - name string, logger log.Logger, db dbm.DB, txDecoder sdk.TxDecoder, options ...func(*BaseApp), -) *BaseApp { - app := &BaseApp{ - logger: logger, - name: name, - db: db, - cms: store.NewCommitMultiStore(db, logger, storemetrics.NewNoOpMetrics()), // by default we use a no-op metric gather in store - storeLoader: DefaultStoreLoader, - grpcQueryRouter: NewGRPCQueryRouter(), - msgServiceRouter: NewMsgServiceRouter(), - txDecoder: txDecoder, - fauxMerkleMode: false, - queryGasLimit: math.MaxUint64, - } - - for _, option := range options { - option(app) - } - - if app.mempool == nil { - app.SetMempool(mempool.NoOpMempool{}) - } - - abciProposalHandler := NewDefaultProposalHandler(app.mempool, app) - - if app.prepareProposal == nil { - app.SetPrepareProposal(abciProposalHandler.PrepareProposalHandler()) - } - if app.processProposal == nil { - app.SetProcessProposal(abciProposalHandler.ProcessProposalHandler()) - } - if app.extendVote == nil { - app.SetExtendVoteHandler(NoOpExtendVote()) - } - if app.verifyVoteExt == nil { - app.SetVerifyVoteExtensionHandler(NoOpVerifyVoteExtensionHandler()) - } - if app.interBlockCache != nil { - app.cms.SetInterBlockCache(app.interBlockCache) - } - - app.runTxRecoveryMiddleware = newDefaultRecoveryMiddleware() - - // Initialize with an empty interface registry to avoid nil pointer dereference. - // Unless SetInterfaceRegistry is called with an interface registry with proper address codecs baseapp will panic. - app.cdc = codec.NewProtoCodec(codectypes.NewInterfaceRegistry()) - - return app -} - -// Name returns the name of the BaseApp. -func (app *BaseApp) Name() string { - return app.name -} - -// AppVersion returns the application's protocol version. -func (app *BaseApp) AppVersion() uint64 { - return app.appVersion -} - -// Version returns the application's version string. -func (app *BaseApp) Version() string { - return app.version -} - -// Logger returns the logger of the BaseApp. -func (app *BaseApp) Logger() log.Logger { - return app.logger -} - -// Trace returns the boolean value for logging error stack traces. -func (app *BaseApp) Trace() bool { - return app.trace -} - -// MsgServiceRouter returns the MsgServiceRouter of a BaseApp. -func (app *BaseApp) MsgServiceRouter() *MsgServiceRouter { return app.msgServiceRouter } - -// SetMsgServiceRouter sets the MsgServiceRouter of a BaseApp. -func (app *BaseApp) SetMsgServiceRouter(msgServiceRouter *MsgServiceRouter) { - app.msgServiceRouter = msgServiceRouter -} - -// MountStores mounts all IAVL or DB stores to the provided keys in the BaseApp -// multistore. -func (app *BaseApp) MountStores(keys ...storetypes.StoreKey) { - for _, key := range keys { - switch key.(type) { - case *storetypes.KVStoreKey: - if !app.fauxMerkleMode { - app.MountStore(key, storetypes.StoreTypeIAVL) - } else { - // StoreTypeDB doesn't do anything upon commit, and it doesn't - // retain history, but it's useful for faster simulation. - app.MountStore(key, storetypes.StoreTypeDB) - } - - case *storetypes.TransientStoreKey: - app.MountStore(key, storetypes.StoreTypeTransient) - - case *storetypes.MemoryStoreKey: - app.MountStore(key, storetypes.StoreTypeMemory) - - default: - panic(fmt.Sprintf("Unrecognized store key type :%T", key)) - } - } -} - -// MountKVStores mounts all IAVL or DB stores to the provided keys in the -// BaseApp multistore. -func (app *BaseApp) MountKVStores(keys map[string]*storetypes.KVStoreKey) { - for _, key := range keys { - if !app.fauxMerkleMode { - app.MountStore(key, storetypes.StoreTypeIAVL) - } else { - // StoreTypeDB doesn't do anything upon commit, and it doesn't - // retain history, but it's useful for faster simulation. - app.MountStore(key, storetypes.StoreTypeDB) - } - } -} - -// MountTransientStores mounts all transient stores to the provided keys in -// the BaseApp multistore. -func (app *BaseApp) MountTransientStores(keys map[string]*storetypes.TransientStoreKey) { - for _, key := range keys { - app.MountStore(key, storetypes.StoreTypeTransient) - } -} - -// MountMemoryStores mounts all in-memory KVStores with the BaseApp's internal -// commit multi-store. -func (app *BaseApp) MountMemoryStores(keys map[string]*storetypes.MemoryStoreKey) { - skeys := maps.Keys(keys) - sort.Strings(skeys) - for _, key := range skeys { - memKey := keys[key] - app.MountStore(memKey, storetypes.StoreTypeMemory) - } -} - -// MountStore mounts a store to the provided key in the BaseApp multistore, -// using the default DB. -func (app *BaseApp) MountStore(key storetypes.StoreKey, typ storetypes.StoreType) { - app.cms.MountStoreWithDB(key, typ, nil) -} - -// LoadLatestVersion loads the latest application version. It will panic if -// called more than once on a running BaseApp. -func (app *BaseApp) LoadLatestVersion() error { - err := app.storeLoader(app.cms) - if err != nil { - return fmt.Errorf("failed to load latest version: %w", err) - } - - return app.Init() -} - -// DefaultStoreLoader will be used by default and loads the latest version -func DefaultStoreLoader(ms storetypes.CommitMultiStore) error { - return ms.LoadLatestVersion() -} - -// CommitMultiStore returns the root multi-store. -// App constructor can use this to access the `cms`. -// UNSAFE: must not be used during the abci life cycle. -func (app *BaseApp) CommitMultiStore() storetypes.CommitMultiStore { - return app.cms -} - -// SnapshotManager returns the snapshot manager. -// application use this to register extra extension snapshotters. -func (app *BaseApp) SnapshotManager() *snapshots.Manager { - return app.snapshotManager -} - -// LoadVersion loads the BaseApp application version. It will panic if called -// more than once on a running baseapp. -func (app *BaseApp) LoadVersion(version int64) error { - app.logger.Info("NOTICE: this could take a long time to migrate IAVL store to fastnode if you enable Fast Node.\n") - err := app.cms.LoadVersion(version) - if err != nil { - return fmt.Errorf("failed to load version %d: %w", version, err) - } - - return app.Init() -} - -// LastCommitID returns the last CommitID of the multistore. -func (app *BaseApp) LastCommitID() storetypes.CommitID { - return app.cms.LastCommitID() -} - -// LastBlockHeight returns the last committed block height. -func (app *BaseApp) LastBlockHeight() int64 { - return app.cms.LastCommitID().Version -} - -// ChainID returns the chainID of the app. -func (app *BaseApp) ChainID() string { - return app.chainID -} - -// AnteHandler returns the AnteHandler of the app. -func (app *BaseApp) AnteHandler() sdk.AnteHandler { - return app.anteHandler -} - -// Mempool returns the Mempool of the app. -func (app *BaseApp) Mempool() mempool.Mempool { - return app.mempool -} - -// Init initializes the app. It seals the app, preventing any -// further modifications. In addition, it validates the app against -// the earlier provided settings. Returns an error if validation fails. -// nil otherwise. Panics if the app is already sealed. -func (app *BaseApp) Init() error { - if app.sealed { - panic("cannot call initFromMainStore: baseapp already sealed") - } - - emptyHeader := cmtproto.Header{ChainID: app.chainID} - - // needed for the export command which inits from store but never calls initchain - app.setState(execModeCheck, emptyHeader) - app.Seal() - - if app.cms == nil { - return errors.New("commit multi-store must not be nil") - } - - return app.cms.GetPruning().Validate() -} - -func (app *BaseApp) setMinGasPrices(gasPrices sdk.DecCoins) { - app.minGasPrices = gasPrices -} - -func (app *BaseApp) setHaltHeight(haltHeight uint64) { - app.haltHeight = haltHeight -} - -func (app *BaseApp) setHaltTime(haltTime uint64) { - app.haltTime = haltTime -} - -func (app *BaseApp) setMinRetainBlocks(minRetainBlocks uint64) { - app.minRetainBlocks = minRetainBlocks -} - -func (app *BaseApp) setInterBlockCache(cache storetypes.MultiStorePersistentCache) { - app.interBlockCache = cache -} - -func (app *BaseApp) setTrace(trace bool) { - app.trace = trace -} - -func (app *BaseApp) setIndexEvents(ie []string) { - app.indexEvents = make(map[string]struct{}) - - for _, e := range ie { - app.indexEvents[e] = struct{}{} - } -} - -// Seal seals a BaseApp. It prohibits any further modifications to a BaseApp. -func (app *BaseApp) Seal() { app.sealed = true } - -// IsSealed returns true if the BaseApp is sealed and false otherwise. -func (app *BaseApp) IsSealed() bool { return app.sealed } - -// setState sets the BaseApp's state for the corresponding mode with a branched -// multi-store (i.e. a CacheMultiStore) and a new Context with the same -// multi-store branch, and provided header. -func (app *BaseApp) setState(mode execMode, header cmtproto.Header) { - ms := app.cms.CacheMultiStore() - baseState := &state{ - ms: ms, - ctx: sdk.NewContext(ms, header, false, app.logger).WithStreamingManager(app.streamingManager), - } - - switch mode { - case execModeCheck: - baseState.ctx = baseState.ctx.WithIsCheckTx(true).WithMinGasPrices(app.minGasPrices) - app.checkState = baseState - - case execModePrepareProposal: - app.prepareProposalState = baseState - - case execModeProcessProposal: - app.processProposalState = baseState - - case execModeFinalize: - app.finalizeBlockState = baseState - - default: - panic(fmt.Sprintf("invalid runTxMode for setState: %d", mode)) - } -} - -// SetCircuitBreaker sets the circuit breaker for the BaseApp. -// The circuit breaker is checked on every message execution to verify if a transaction should be executed or not. -func (app *BaseApp) SetCircuitBreaker(cb CircuitBreaker) { - if app.msgServiceRouter == nil { - panic("cannot set circuit breaker with no msg service router set") - } - app.msgServiceRouter.SetCircuit(cb) -} - -// GetConsensusParams returns the current consensus parameters from the BaseApp's -// ParamStore. If the BaseApp has no ParamStore defined, nil is returned. -func (app *BaseApp) GetConsensusParams(ctx sdk.Context) cmtproto.ConsensusParams { - if app.paramStore == nil { - return cmtproto.ConsensusParams{} - } - - cp, err := app.paramStore.Get(ctx) - if err != nil { - panic(fmt.Errorf("consensus key is nil: %w", err)) - } - - return cp -} - -// StoreConsensusParams sets the consensus parameters to the BaseApp's param -// store. -// -// NOTE: We're explicitly not storing the CometBFT app_version in the param store. -// It's stored instead in the x/upgrade store, with its own bump logic. -func (app *BaseApp) StoreConsensusParams(ctx sdk.Context, cp cmtproto.ConsensusParams) error { - if app.paramStore == nil { - return errors.New("cannot store consensus params with no params store set") - } - - return app.paramStore.Set(ctx, cp) -} - -// AddRunTxRecoveryHandler adds custom app.runTx method panic handlers. -func (app *BaseApp) AddRunTxRecoveryHandler(handlers ...RecoveryHandler) { - for _, h := range handlers { - app.runTxRecoveryMiddleware = newRecoveryMiddleware(h, app.runTxRecoveryMiddleware) - } -} - -// GetMaximumBlockGas gets the maximum gas from the consensus params. It panics -// if maximum block gas is less than negative one and returns zero if negative -// one. -func (app *BaseApp) GetMaximumBlockGas(ctx sdk.Context) uint64 { - cp := app.GetConsensusParams(ctx) - if cp.Block == nil { - return 0 - } - - maxGas := cp.Block.MaxGas - - switch { - case maxGas < -1: - panic(fmt.Sprintf("invalid maximum block gas: %d", maxGas)) - - case maxGas == -1: - return 0 - - default: - return uint64(maxGas) - } -} - -func (app *BaseApp) validateFinalizeBlockHeight(req *abci.RequestFinalizeBlock) error { - if req.Height < 1 { - return fmt.Errorf("invalid height: %d", req.Height) - } - - lastBlockHeight := app.LastBlockHeight() - - // expectedHeight holds the expected height to validate - var expectedHeight int64 - if lastBlockHeight == 0 && app.initialHeight > 1 { - // In this case, we're validating the first block of the chain, i.e no - // previous commit. The height we're expecting is the initial height. - expectedHeight = app.initialHeight - } else { - // This case can mean two things: - // - // - Either there was already a previous commit in the store, in which - // case we increment the version from there. - // - Or there was no previous commit, in which case we start at version 1. - expectedHeight = lastBlockHeight + 1 - } - - if req.Height != expectedHeight { - return fmt.Errorf("invalid height: %d; expected: %d", req.Height, expectedHeight) - } - - return nil -} - -// validateBasicTxMsgs executes basic validator calls for messages. -func validateBasicTxMsgs(msgs []sdk.Msg) error { - if len(msgs) == 0 { - return errorsmod.Wrap(sdkerrors.ErrInvalidRequest, "must contain at least one message") - } - - for _, msg := range msgs { - m, ok := msg.(sdk.HasValidateBasic) - if !ok { - continue - } - - if err := m.ValidateBasic(); err != nil { - return err - } - } - - return nil -} - -func (app *BaseApp) getState(mode execMode) *state { - switch mode { - case execModeFinalize: - return app.finalizeBlockState - - case execModePrepareProposal: - return app.prepareProposalState - - case execModeProcessProposal: - return app.processProposalState - - default: - return app.checkState - } -} - -func (app *BaseApp) getBlockGasMeter(ctx sdk.Context) storetypes.GasMeter { - if maxGas := app.GetMaximumBlockGas(ctx); maxGas > 0 { - return storetypes.NewGasMeter(maxGas) - } - - return storetypes.NewInfiniteGasMeter() -} - -// retrieve the context for the tx w/ txBytes and other memoized values. -func (app *BaseApp) getContextForTx(mode execMode, txBytes []byte) sdk.Context { - modeState := app.getState(mode) - if modeState == nil { - panic(fmt.Sprintf("state is nil for mode %v", mode)) - } - ctx := modeState.ctx. - WithTxBytes(txBytes) - // WithVoteInfos(app.voteInfos) // TODO: identify if this is needed - - ctx = ctx.WithConsensusParams(app.GetConsensusParams(ctx)) - - if mode == execModeReCheck { - ctx = ctx.WithIsReCheckTx(true) - } - - if mode == execModeSimulate { - ctx, _ = ctx.CacheContext() - } - - return ctx -} - -// cacheTxContext returns a new context based off of the provided context with -// a branched multi-store. -func (app *BaseApp) cacheTxContext(ctx sdk.Context, txBytes []byte) (sdk.Context, storetypes.CacheMultiStore) { - ms := ctx.MultiStore() - // TODO: https://github.com/cosmos/cosmos-sdk/issues/2824 - msCache := ms.CacheMultiStore() - if msCache.TracingEnabled() { - msCache = msCache.SetTracingContext( - storetypes.TraceContext( - map[string]interface{}{ - "txHash": fmt.Sprintf("%X", tmhash.Sum(txBytes)), - }, - ), - ).(storetypes.CacheMultiStore) - } - - return ctx.WithMultiStore(msCache), msCache -} - -func (app *BaseApp) preBlock(req *abci.RequestFinalizeBlock) error { - if app.preBlocker != nil { - ctx := app.finalizeBlockState.ctx - rsp, err := app.preBlocker(ctx, req) - if err != nil { - return err - } - // rsp.ConsensusParamsChanged is true from preBlocker means ConsensusParams in store get changed - // write the consensus parameters in store to context - if rsp.ConsensusParamsChanged { - ctx = ctx.WithConsensusParams(app.GetConsensusParams(ctx)) - // GasMeter must be set after we get a context with updated consensus params. - gasMeter := app.getBlockGasMeter(ctx) - ctx = ctx.WithBlockGasMeter(gasMeter) - app.finalizeBlockState.ctx = ctx - } - } - return nil -} - -func (app *BaseApp) beginBlock(req *abci.RequestFinalizeBlock) (sdk.BeginBlock, error) { - var ( - resp sdk.BeginBlock - err error - ) - - if app.beginBlocker != nil { - resp, err = app.beginBlocker(app.finalizeBlockState.ctx) - if err != nil { - return resp, err - } - - // append BeginBlock attributes to all events in the EndBlock response - for i, event := range resp.Events { - resp.Events[i].Attributes = append( - event.Attributes, - abci.EventAttribute{Key: "mode", Value: "BeginBlock"}, - ) - } - - resp.Events = sdk.MarkEventsToIndex(resp.Events, app.indexEvents) - } - - return resp, nil -} - -func (app *BaseApp) deliverTx(tx []byte) *abci.ExecTxResult { - gInfo := sdk.GasInfo{} - resultStr := "successful" - - var resp *abci.ExecTxResult - - defer func() { - telemetry.IncrCounter(1, "tx", "count") - telemetry.IncrCounter(1, "tx", resultStr) - telemetry.SetGauge(float32(gInfo.GasUsed), "tx", "gas", "used") - telemetry.SetGauge(float32(gInfo.GasWanted), "tx", "gas", "wanted") - }() - - gInfo, result, anteEvents, err := app.runTx(execModeFinalize, tx) - if err != nil { - resultStr = "failed" - resp = sdkerrors.ResponseExecTxResultWithEvents( - err, - gInfo.GasWanted, - gInfo.GasUsed, - sdk.MarkEventsToIndex(anteEvents, app.indexEvents), - app.trace, - ) - return resp - } - - resp = &abci.ExecTxResult{ - GasWanted: int64(gInfo.GasWanted), - GasUsed: int64(gInfo.GasUsed), - Log: result.Log, - Data: result.Data, - Events: sdk.MarkEventsToIndex(result.Events, app.indexEvents), - } - - return resp -} - -// endBlock is an application-defined function that is called after transactions -// have been processed in FinalizeBlock. -func (app *BaseApp) endBlock(ctx context.Context) (sdk.EndBlock, error) { - var endblock sdk.EndBlock - - if app.endBlocker != nil { - eb, err := app.endBlocker(app.finalizeBlockState.ctx) - if err != nil { - return endblock, err - } - - // append EndBlock attributes to all events in the EndBlock response - for i, event := range eb.Events { - eb.Events[i].Attributes = append( - event.Attributes, - abci.EventAttribute{Key: "mode", Value: "EndBlock"}, - ) - } - - eb.Events = sdk.MarkEventsToIndex(eb.Events, app.indexEvents) - endblock = eb - } - - return endblock, nil -} - -// runTx processes a transaction within a given execution mode, encoded transaction -// bytes, and the decoded transaction itself. All state transitions occur through -// a cached Context depending on the mode provided. State only gets persisted -// if all messages get executed successfully and the execution mode is DeliverTx. -// Note, gas execution info is always returned. A reference to a Result is -// returned if the tx does not run out of gas and if all the messages are valid -// and execute successfully. An error is returned otherwise. -func (app *BaseApp) runTx(mode execMode, txBytes []byte) (gInfo sdk.GasInfo, result *sdk.Result, anteEvents []abci.Event, err error) { - // NOTE: GasWanted should be returned by the AnteHandler. GasUsed is - // determined by the GasMeter. We need access to the context to get the gas - // meter, so we initialize upfront. - var gasWanted uint64 - - ctx := app.getContextForTx(mode, txBytes) - ms := ctx.MultiStore() - - // only run the tx if there is block gas remaining - if mode == execModeFinalize && ctx.BlockGasMeter().IsOutOfGas() { - return gInfo, nil, nil, errorsmod.Wrap(sdkerrors.ErrOutOfGas, "no block gas left to run tx") - } - - defer func() { - if r := recover(); r != nil { - recoveryMW := newOutOfGasRecoveryMiddleware(gasWanted, ctx, app.runTxRecoveryMiddleware) - err, result = processRecovery(r, recoveryMW), nil - ctx.Logger().Error("panic recovered in runTx", "err", err) - } - - gInfo = sdk.GasInfo{GasWanted: gasWanted, GasUsed: ctx.GasMeter().GasConsumed()} - }() - - blockGasConsumed := false - - // consumeBlockGas makes sure block gas is consumed at most once. It must - // happen after tx processing, and must be executed even if tx processing - // fails. Hence, it's execution is deferred. - consumeBlockGas := func() { - if !blockGasConsumed { - blockGasConsumed = true - ctx.BlockGasMeter().ConsumeGas( - ctx.GasMeter().GasConsumedToLimit(), "block gas meter", - ) - } - } - - // If BlockGasMeter() panics it will be caught by the above recover and will - // return an error - in any case BlockGasMeter will consume gas past the limit. - // - // NOTE: consumeBlockGas must exist in a separate defer function from the - // general deferred recovery function to recover from consumeBlockGas as it'll - // be executed first (deferred statements are executed as stack). - if mode == execModeFinalize { - defer consumeBlockGas() - } - - tx, err := app.txDecoder(txBytes) - if err != nil { - return sdk.GasInfo{}, nil, nil, err - } - - msgs := tx.GetMsgs() - if err := validateBasicTxMsgs(msgs); err != nil { - return sdk.GasInfo{}, nil, nil, err - } - - for _, msg := range msgs { - handler := app.msgServiceRouter.Handler(msg) - if handler == nil { - return sdk.GasInfo{}, nil, nil, errorsmod.Wrapf(sdkerrors.ErrUnknownRequest, "no message handler found for %T", msg) - } - } - - if app.anteHandler != nil { - var ( - anteCtx sdk.Context - msCache storetypes.CacheMultiStore - ) - - // Branch context before AnteHandler call in case it aborts. - // This is required for both CheckTx and DeliverTx. - // Ref: https://github.com/cosmos/cosmos-sdk/issues/2772 - // - // NOTE: Alternatively, we could require that AnteHandler ensures that - // writes do not happen if aborted/failed. This may have some - // performance benefits, but it'll be more difficult to get right. - anteCtx, msCache = app.cacheTxContext(ctx, txBytes) - anteCtx = anteCtx.WithEventManager(sdk.NewEventManager()) - newCtx, err := app.anteHandler(anteCtx, tx, mode == execModeSimulate) - - if !newCtx.IsZero() { - // At this point, newCtx.MultiStore() is a store branch, or something else - // replaced by the AnteHandler. We want the original multistore. - // - // Also, in the case of the tx aborting, we need to track gas consumed via - // the instantiated gas meter in the AnteHandler, so we update the context - // prior to returning. - ctx = newCtx.WithMultiStore(ms) - } - - events := ctx.EventManager().Events() - - // GasMeter expected to be set in AnteHandler - gasWanted = ctx.GasMeter().Limit() - - if err != nil { - return gInfo, nil, nil, err - } - - msCache.Write() - anteEvents = events.ToABCIEvents() - } - - if mode == execModeCheck { - err = app.mempool.Insert(ctx, tx) - if err != nil { - return gInfo, nil, anteEvents, err - } - } else if mode == execModeFinalize { - err = app.mempool.Remove(tx) - if err != nil && !errors.Is(err, mempool.ErrTxNotFound) { - return gInfo, nil, anteEvents, - fmt.Errorf("failed to remove tx from mempool: %w", err) - } - } - - // Create a new Context based off of the existing Context with a MultiStore branch - // in case message processing fails. At this point, the MultiStore - // is a branch of a branch. - runMsgCtx, msCache := app.cacheTxContext(ctx, txBytes) - - // Attempt to execute all messages and only update state if all messages pass - // and we're in DeliverTx. Note, runMsgs will never return a reference to a - // Result if any single message fails or does not have a registered Handler. - msgsV2, err := tx.GetMsgsV2() - if err == nil { - result, err = app.runMsgs(runMsgCtx, msgs, msgsV2, mode) - } - if err == nil { - // Run optional postHandlers. - // - // Note: If the postHandler fails, we also revert the runMsgs state. - if app.postHandler != nil { - // The runMsgCtx context currently contains events emitted by the ante handler. - // We clear this to correctly order events without duplicates. - // Note that the state is still preserved. - postCtx := runMsgCtx.WithEventManager(sdk.NewEventManager()) - - newCtx, err := app.postHandler(postCtx, tx, mode == execModeSimulate, err == nil) - if err != nil { - return gInfo, nil, anteEvents, err - } - - result.Events = append(result.Events, newCtx.EventManager().ABCIEvents()...) - } - - if mode == execModeFinalize { - // When block gas exceeds, it'll panic and won't commit the cached store. - consumeBlockGas() - - msCache.Write() - } - - if len(anteEvents) > 0 && (mode == execModeFinalize || mode == execModeSimulate) { - // append the events in the order of occurrence - result.Events = append(anteEvents, result.Events...) - } - } - - return gInfo, result, anteEvents, err -} - -// runMsgs iterates through a list of messages and executes them with the provided -// Context and execution mode. Messages will only be executed during simulation -// and DeliverTx. An error is returned if any single message fails or if a -// Handler does not exist for a given message route. Otherwise, a reference to a -// Result is returned. The caller must not commit state if an error is returned. -func (app *BaseApp) runMsgs(ctx sdk.Context, msgs []sdk.Msg, msgsV2 []protov2.Message, mode execMode) (*sdk.Result, error) { - events := sdk.EmptyEvents() - var msgResponses []*codectypes.Any - - // NOTE: GasWanted is determined by the AnteHandler and GasUsed by the GasMeter. - for i, msg := range msgs { - if mode != execModeFinalize && mode != execModeSimulate { - break - } - - handler := app.msgServiceRouter.Handler(msg) - if handler == nil { - return nil, errorsmod.Wrapf(sdkerrors.ErrUnknownRequest, "no message handler found for %T", msg) - } - - // ADR 031 request type routing - msgResult, err := handler(ctx, msg) - if err != nil { - return nil, errorsmod.Wrapf(err, "failed to execute message; message index: %d", i) - } - - // create message events - msgEvents, err := createEvents(app.cdc, msgResult.GetEvents(), msg, msgsV2[i]) - if err != nil { - return nil, errorsmod.Wrapf(err, "failed to create message events; message index: %d", i) - } - - // append message events and data - // - // Note: Each message result's data must be length-prefixed in order to - // separate each result. - for j, event := range msgEvents { - // append message index to all events - msgEvents[j] = event.AppendAttributes(sdk.NewAttribute("msg_index", strconv.Itoa(i))) - } - - events = events.AppendEvents(msgEvents) - - // Each individual sdk.Result that went through the MsgServiceRouter - // (which should represent 99% of the Msgs now, since everyone should - // be using protobuf Msgs) has exactly one Msg response, set inside - // `WrapServiceResult`. We take that Msg response, and aggregate it - // into an array. - if len(msgResult.MsgResponses) > 0 { - msgResponse := msgResult.MsgResponses[0] - if msgResponse == nil { - return nil, sdkerrors.ErrLogic.Wrapf("got nil Msg response at index %d for msg %s", i, sdk.MsgTypeURL(msg)) - } - msgResponses = append(msgResponses, msgResponse) - } - - } - - data, err := makeABCIData(msgResponses) - if err != nil { - return nil, errorsmod.Wrap(err, "failed to marshal tx data") - } - - return &sdk.Result{ - Data: data, - Events: events.ToABCIEvents(), - MsgResponses: msgResponses, - }, nil -} - -// makeABCIData generates the Data field to be sent to ABCI Check/DeliverTx. -func makeABCIData(msgResponses []*codectypes.Any) ([]byte, error) { - return proto.Marshal(&sdk.TxMsgData{MsgResponses: msgResponses}) -} - -func createEvents(cdc codec.Codec, events sdk.Events, msg sdk.Msg, msgV2 protov2.Message) (sdk.Events, error) { - eventMsgName := sdk.MsgTypeURL(msg) - msgEvent := sdk.NewEvent(sdk.EventTypeMessage, sdk.NewAttribute(sdk.AttributeKeyAction, eventMsgName)) - - // we set the signer attribute as the sender - signers, err := cdc.GetMsgV2Signers(msgV2) - if err != nil { - return nil, err - } - if len(signers) > 0 && signers[0] != nil { - addrStr, err := cdc.InterfaceRegistry().SigningContext().AddressCodec().BytesToString(signers[0]) - if err != nil { - return nil, err - } - msgEvent = msgEvent.AppendAttributes(sdk.NewAttribute(sdk.AttributeKeySender, addrStr)) - } - - // verify that events have no module attribute set - if _, found := events.GetAttributes(sdk.AttributeKeyModule); !found { - if moduleName := sdk.GetModuleNameFromTypeURL(eventMsgName); moduleName != "" { - msgEvent = msgEvent.AppendAttributes(sdk.NewAttribute(sdk.AttributeKeyModule, moduleName)) - } - } - - return sdk.Events{msgEvent}.AppendEvents(events), nil -} - -// PrepareProposalVerifyTx performs transaction verification when a proposer is -// creating a block proposal during PrepareProposal. Any state committed to the -// PrepareProposal state internally will be discarded. will be -// returned if the transaction cannot be encoded. will be returned if -// the transaction is valid, otherwise will be returned. -func (app *BaseApp) PrepareProposalVerifyTx(tx sdk.Tx) ([]byte, error) { - bz, err := app.txEncoder(tx) - if err != nil { - return nil, err - } - - _, _, _, err = app.runTx(execModePrepareProposal, bz) - if err != nil { - return nil, err - } - - return bz, nil -} - -// ProcessProposalVerifyTx performs transaction verification when receiving a -// block proposal during ProcessProposal. Any state committed to the -// ProcessProposal state internally will be discarded. will be -// returned if the transaction cannot be decoded. will be returned if -// the transaction is valid, otherwise will be returned. -func (app *BaseApp) ProcessProposalVerifyTx(txBz []byte) (sdk.Tx, error) { - tx, err := app.txDecoder(txBz) - if err != nil { - return nil, err - } - - _, _, _, err = app.runTx(execModeProcessProposal, txBz) - if err != nil { - return nil, err - } - - return tx, nil -} - -func (app *BaseApp) TxDecode(txBytes []byte) (sdk.Tx, error) { - return app.txDecoder(txBytes) -} - -func (app *BaseApp) TxEncode(tx sdk.Tx) ([]byte, error) { - return app.txEncoder(tx) -} - -// Close is called in start cmd to gracefully cleanup resources. -func (app *BaseApp) Close() error { - var errs []error - - // Close app.db (opened by cosmos-sdk/server/start.go call to openDB) - if app.db != nil { - app.logger.Info("Closing application.db") - if err := app.db.Close(); err != nil { - errs = append(errs, err) - } - } - - // Close app.snapshotManager - // - opened when app chains use cosmos-sdk/server/util.go/DefaultBaseappOptions (boilerplate) - // - which calls cosmos-sdk/server/util.go/GetSnapshotStore - // - which is passed to baseapp/options.go/SetSnapshot - // - to set app.snapshotManager = snapshots.NewManager - if app.snapshotManager != nil { - app.logger.Info("Closing snapshots/metadata.db") - if err := app.snapshotManager.Close(); err != nil { - errs = append(errs, err) - } - } - - return errors.Join(errs...) -} diff --git a/baseapp/baseapp_test.go b/baseapp/baseapp_test.go deleted file mode 100644 index d8371ead0d..0000000000 --- a/baseapp/baseapp_test.go +++ /dev/null @@ -1,766 +0,0 @@ -package baseapp_test - -import ( - "context" - "fmt" - "math/rand" - "testing" - "time" - - abci "github.com/cometbft/cometbft/abci/types" - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" - dbm "github.com/cosmos/cosmos-db" - "github.com/stretchr/testify/require" - - errorsmod "cosmossdk.io/errors" - "cosmossdk.io/log" - "cosmossdk.io/store/metrics" - pruningtypes "cosmossdk.io/store/pruning/types" - "cosmossdk.io/store/rootmulti" - "cosmossdk.io/store/snapshots" - snapshottypes "cosmossdk.io/store/snapshots/types" - storetypes "cosmossdk.io/store/types" - - "github.com/cosmos/cosmos-sdk/baseapp" - baseapptestutil "github.com/cosmos/cosmos-sdk/baseapp/testutil" - "github.com/cosmos/cosmos-sdk/client" - "github.com/cosmos/cosmos-sdk/codec" - codectestutil "github.com/cosmos/cosmos-sdk/codec/testutil" - codectypes "github.com/cosmos/cosmos-sdk/codec/types" - "github.com/cosmos/cosmos-sdk/testutil" - "github.com/cosmos/cosmos-sdk/testutil/testdata" - sdk "github.com/cosmos/cosmos-sdk/types" - authtx "github.com/cosmos/cosmos-sdk/x/auth/tx" -) - -var ( - capKey1 = storetypes.NewKVStoreKey("key1") - capKey2 = storetypes.NewKVStoreKey("key2") - - // testTxPriority is the CheckTx priority that we set in the test - // AnteHandler. - testTxPriority = int64(42) -) - -type ( - BaseAppSuite struct { - baseApp *baseapp.BaseApp - cdc *codec.ProtoCodec - txConfig client.TxConfig - } - - SnapshotsConfig struct { - blocks uint64 - blockTxs int - snapshotInterval uint64 - snapshotKeepRecent uint32 - pruningOpts pruningtypes.PruningOptions - } -) - -func NewBaseAppSuite(t *testing.T, opts ...func(*baseapp.BaseApp)) *BaseAppSuite { - cdc := codectestutil.CodecOptions{}.NewCodec() - baseapptestutil.RegisterInterfaces(cdc.InterfaceRegistry()) - - txConfig := authtx.NewTxConfig(cdc, authtx.DefaultSignModes) - db := dbm.NewMemDB() - - app := baseapp.NewBaseApp(t.Name(), log.NewTestLogger(t), db, txConfig.TxDecoder(), opts...) - require.Equal(t, t.Name(), app.Name()) - - app.SetInterfaceRegistry(cdc.InterfaceRegistry()) - app.MsgServiceRouter().SetInterfaceRegistry(cdc.InterfaceRegistry()) - app.MountStores(capKey1, capKey2) - app.SetParamStore(paramStore{db: dbm.NewMemDB()}) - app.SetTxDecoder(txConfig.TxDecoder()) - app.SetTxEncoder(txConfig.TxEncoder()) - - // mount stores and seal - require.Nil(t, app.LoadLatestVersion()) - - return &BaseAppSuite{ - baseApp: app, - cdc: cdc, - txConfig: txConfig, - } -} - -func getQueryBaseapp(t *testing.T) *baseapp.BaseApp { - t.Helper() - - db := dbm.NewMemDB() - name := t.Name() - app := baseapp.NewBaseApp(name, log.NewTestLogger(t), db, nil) - - _, err := app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1}) - require.NoError(t, err) - _, err = app.Commit() - require.NoError(t, err) - - _, err = app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 2}) - require.NoError(t, err) - _, err = app.Commit() - require.NoError(t, err) - - return app -} - -func NewBaseAppSuiteWithSnapshots(t *testing.T, cfg SnapshotsConfig, opts ...func(*baseapp.BaseApp)) *BaseAppSuite { - snapshotTimeout := 1 * time.Minute - snapshotStore, err := snapshots.NewStore(dbm.NewMemDB(), testutil.GetTempDir(t)) - require.NoError(t, err) - - suite := NewBaseAppSuite( - t, - append( - opts, - baseapp.SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(cfg.snapshotInterval, cfg.snapshotKeepRecent)), - baseapp.SetPruning(cfg.pruningOpts), - )..., - ) - - baseapptestutil.RegisterKeyValueServer(suite.baseApp.MsgServiceRouter(), MsgKeyValueImpl{}) - - _, err = suite.baseApp.InitChain(&abci.RequestInitChain{ - ConsensusParams: &cmtproto.ConsensusParams{}, - }) - require.NoError(t, err) - - r := rand.New(rand.NewSource(3920758213583)) - keyCounter := 0 - - for height := int64(1); height <= int64(cfg.blocks); height++ { - - _, _, addr := testdata.KeyTestPubAddr() - txs := [][]byte{} - for txNum := 0; txNum < cfg.blockTxs; txNum++ { - msgs := []sdk.Msg{} - for msgNum := 0; msgNum < 100; msgNum++ { - key := []byte(fmt.Sprintf("%v", keyCounter)) - value := make([]byte, 10000) - - _, err := r.Read(value) - require.NoError(t, err) - - msgs = append(msgs, &baseapptestutil.MsgKeyValue{Key: key, Value: value, Signer: addr.String()}) - keyCounter++ - } - - builder := suite.txConfig.NewTxBuilder() - builder.SetMsgs(msgs...) - setTxSignature(t, builder, 0) - - txBytes, err := suite.txConfig.TxEncoder()(builder.GetTx()) - require.NoError(t, err) - - txs = append(txs, txBytes) - } - - _, err := suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{ - Height: height, - Txs: txs, - }) - require.NoError(t, err) - - _, err = suite.baseApp.Commit() - require.NoError(t, err) - - // wait for snapshot to be taken, since it happens asynchronously - if cfg.snapshotInterval > 0 && uint64(height)%cfg.snapshotInterval == 0 { - start := time.Now() - for { - if time.Since(start) > snapshotTimeout { - t.Errorf("timed out waiting for snapshot after %v", snapshotTimeout) - } - - snapshot, err := snapshotStore.Get(uint64(height), snapshottypes.CurrentFormat) - require.NoError(t, err) - - if snapshot != nil { - break - } - - time.Sleep(100 * time.Millisecond) - } - } - } - - return suite -} - -func TestLoadVersion(t *testing.T) { - logger := log.NewTestLogger(t) - pruningOpt := baseapp.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - db := dbm.NewMemDB() - name := t.Name() - app := baseapp.NewBaseApp(name, logger, db, nil, pruningOpt) - - // make a cap key and mount the store - err := app.LoadLatestVersion() // needed to make stores non-nil - require.Nil(t, err) - - emptyCommitID := storetypes.CommitID{} - - // fresh store has zero/empty last commit - lastHeight := app.LastBlockHeight() - lastID := app.LastCommitID() - require.Equal(t, int64(0), lastHeight) - require.Equal(t, emptyCommitID, lastID) - - // execute a block, collect commit ID - res, err := app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1}) - require.NoError(t, err) - commitID1 := storetypes.CommitID{Version: 1, Hash: res.AppHash} - _, err = app.Commit() - require.NoError(t, err) - - // execute a block, collect commit ID - res, err = app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 2}) - require.NoError(t, err) - commitID2 := storetypes.CommitID{Version: 2, Hash: res.AppHash} - _, err = app.Commit() - require.NoError(t, err) - - // reload with LoadLatestVersion - app = baseapp.NewBaseApp(name, logger, db, nil, pruningOpt) - app.MountStores() - - err = app.LoadLatestVersion() - require.Nil(t, err) - - testLoadVersionHelper(t, app, int64(2), commitID2) - - // Reload with LoadVersion, see if you can commit the same block and get - // the same result. - app = baseapp.NewBaseApp(name, logger, db, nil, pruningOpt) - err = app.LoadVersion(1) - require.Nil(t, err) - - testLoadVersionHelper(t, app, int64(1), commitID1) - - _, err = app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 2}) - require.NoError(t, err) - _, err = app.Commit() - require.NoError(t, err) - - testLoadVersionHelper(t, app, int64(2), commitID2) -} - -func TestSetLoader(t *testing.T) { - useDefaultLoader := func(app *baseapp.BaseApp) { - app.SetStoreLoader(baseapp.DefaultStoreLoader) - } - - initStore := func(t *testing.T, db dbm.DB, storeKey string, k, v []byte) { - rs := rootmulti.NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) - rs.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - - key := storetypes.NewKVStoreKey(storeKey) - rs.MountStoreWithDB(key, storetypes.StoreTypeIAVL, nil) - - err := rs.LoadLatestVersion() - require.Nil(t, err) - require.Equal(t, int64(0), rs.LastCommitID().Version) - - // write some data in substore - kv, _ := rs.GetStore(key).(storetypes.KVStore) - require.NotNil(t, kv) - kv.Set(k, v) - - commitID := rs.Commit() - require.Equal(t, int64(1), commitID.Version) - } - - checkStore := func(t *testing.T, db dbm.DB, ver int64, storeKey string, k, v []byte) { - rs := rootmulti.NewStore(db, log.NewNopLogger(), metrics.NewNoOpMetrics()) - rs.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningDefault)) - - key := storetypes.NewKVStoreKey(storeKey) - rs.MountStoreWithDB(key, storetypes.StoreTypeIAVL, nil) - - err := rs.LoadLatestVersion() - require.Nil(t, err) - require.Equal(t, ver, rs.LastCommitID().Version) - - // query data in substore - kv, _ := rs.GetStore(key).(storetypes.KVStore) - require.NotNil(t, kv) - require.Equal(t, v, kv.Get(k)) - } - - testCases := map[string]struct { - setLoader func(*baseapp.BaseApp) - origStoreKey string - loadStoreKey string - }{ - "don't set loader": { - origStoreKey: "foo", - loadStoreKey: "foo", - }, - "default loader": { - setLoader: useDefaultLoader, - origStoreKey: "foo", - loadStoreKey: "foo", - }, - } - - k := []byte("key") - v := []byte("value") - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - // prepare a db with some data - db := dbm.NewMemDB() - initStore(t, db, tc.origStoreKey, k, v) - - // load the app with the existing db - opts := []func(*baseapp.BaseApp){baseapp.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))} - if tc.setLoader != nil { - opts = append(opts, tc.setLoader) - } - app := baseapp.NewBaseApp(t.Name(), log.NewTestLogger(t), db, nil, opts...) - app.MountStores(storetypes.NewKVStoreKey(tc.loadStoreKey)) - err := app.LoadLatestVersion() - require.Nil(t, err) - - // "execute" one block - res, err := app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 2}) - require.NoError(t, err) - require.NotNil(t, res.AppHash) - _, err = app.Commit() - require.NoError(t, err) - - // check db is properly updated - checkStore(t, db, 2, tc.loadStoreKey, k, v) - checkStore(t, db, 2, tc.loadStoreKey, []byte("foo"), nil) - }) - } -} - -func TestVersionSetterGetter(t *testing.T) { - pruningOpt := baseapp.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningDefault)) - db := dbm.NewMemDB() - name := t.Name() - app := baseapp.NewBaseApp(name, log.NewTestLogger(t), db, nil, pruningOpt) - - require.Equal(t, "", app.Version()) - res, err := app.Query(context.TODO(), &abci.RequestQuery{Path: "app/version"}) - require.NoError(t, err) - require.True(t, res.IsOK()) - require.Equal(t, "", string(res.Value)) - - versionString := "1.0.0" - app.SetVersion(versionString) - require.Equal(t, versionString, app.Version()) - - res, err = app.Query(context.TODO(), &abci.RequestQuery{Path: "app/version"}) - require.NoError(t, err) - require.True(t, res.IsOK()) - require.Equal(t, versionString, string(res.Value)) -} - -func TestLoadVersionInvalid(t *testing.T) { - logger := log.NewNopLogger() - pruningOpt := baseapp.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - db := dbm.NewMemDB() - name := t.Name() - app := baseapp.NewBaseApp(name, logger, db, nil, pruningOpt) - - err := app.LoadLatestVersion() - require.Nil(t, err) - - // require error when loading an invalid version - err = app.LoadVersion(-1) - require.Error(t, err) - - res, err := app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1}) - require.NoError(t, err) - commitID1 := storetypes.CommitID{Version: 1, Hash: res.AppHash} - _, err = app.Commit() - require.NoError(t, err) - - // create a new app with the stores mounted under the same cap key - app = baseapp.NewBaseApp(name, logger, db, nil, pruningOpt) - - // require we can load the latest version - err = app.LoadVersion(1) - require.Nil(t, err) - testLoadVersionHelper(t, app, int64(1), commitID1) - - // require error when loading an invalid version - err = app.LoadVersion(2) - require.Error(t, err) -} - -func TestOptionFunction(t *testing.T) { - testChangeNameHelper := func(name string) func(*baseapp.BaseApp) { - return func(bap *baseapp.BaseApp) { - bap.SetName(name) - } - } - - db := dbm.NewMemDB() - bap := baseapp.NewBaseApp("starting name", log.NewTestLogger(t), db, nil, testChangeNameHelper("new name")) - require.Equal(t, bap.Name(), "new name", "BaseApp should have had name changed via option function") -} - -func TestBaseAppOptionSeal(t *testing.T) { - suite := NewBaseAppSuite(t) - - require.Panics(t, func() { - suite.baseApp.SetName("") - }) - require.Panics(t, func() { - suite.baseApp.SetVersion("") - }) - require.Panics(t, func() { - suite.baseApp.SetDB(nil) - }) - require.Panics(t, func() { - suite.baseApp.SetCMS(nil) - }) - require.Panics(t, func() { - suite.baseApp.SetInitChainer(nil) - }) - require.Panics(t, func() { - suite.baseApp.SetPreBlocker(nil) - }) - require.Panics(t, func() { - suite.baseApp.SetBeginBlocker(nil) - }) - require.Panics(t, func() { - suite.baseApp.SetEndBlocker(nil) - }) - require.Panics(t, func() { - suite.baseApp.SetPrepareCheckStater(nil) - }) - require.Panics(t, func() { - suite.baseApp.SetPrecommiter(nil) - }) - require.Panics(t, func() { - suite.baseApp.SetAnteHandler(nil) - }) - require.Panics(t, func() { - suite.baseApp.SetAddrPeerFilter(nil) - }) - require.Panics(t, func() { - suite.baseApp.SetIDPeerFilter(nil) - }) - require.Panics(t, func() { - suite.baseApp.SetFauxMerkleMode() - }) -} - -func TestTxDecoder(t *testing.T) { - cdc := codec.NewProtoCodec(codectypes.NewInterfaceRegistry()) - baseapptestutil.RegisterInterfaces(cdc.InterfaceRegistry()) - - // patch in TxConfig instead of using an output from x/auth/tx - txConfig := authtx.NewTxConfig(cdc, authtx.DefaultSignModes) - - tx := newTxCounter(t, txConfig, 1, 0) - txBytes, err := txConfig.TxEncoder()(tx) - require.NoError(t, err) - - dTx, err := txConfig.TxDecoder()(txBytes) - require.NoError(t, err) - - counter, _ := parseTxMemo(t, tx) - dTxCounter, _ := parseTxMemo(t, dTx) - require.Equal(t, counter, dTxCounter) -} - -func TestCustomRunTxPanicHandler(t *testing.T) { - customPanicMsg := "test panic" - anteErr := errorsmod.Register("fakeModule", 100500, "fakeError") - anteOpt := func(bapp *baseapp.BaseApp) { - bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { - panic(errorsmod.Wrap(anteErr, "anteHandler")) - }) - } - - suite := NewBaseAppSuite(t, anteOpt) - baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), NoopCounterServerImpl{}) - - _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ - ConsensusParams: &cmtproto.ConsensusParams{}, - }) - require.NoError(t, err) - - suite.baseApp.AddRunTxRecoveryHandler(func(recoveryObj interface{}) error { - err, ok := recoveryObj.(error) - if !ok { - return nil - } - - if anteErr.Is(err) { - panic(customPanicMsg) - } else { - return nil - } - }) - - // transaction should panic with custom handler above - { - tx := newTxCounter(t, suite.txConfig, 0, 0) - - require.PanicsWithValue(t, customPanicMsg, func() { - bz, err := suite.txConfig.TxEncoder()(tx) - require.NoError(t, err) - suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1, Txs: [][]byte{bz}}) - }) - } -} - -func TestBaseAppAnteHandler(t *testing.T) { - anteKey := []byte("ante-key") - anteOpt := func(bapp *baseapp.BaseApp) { - bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) - } - suite := NewBaseAppSuite(t, anteOpt) - - deliverKey := []byte("deliver-key") - baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), CounterServerImpl{t, capKey1, deliverKey}) - - _, err := suite.baseApp.InitChain(&abci.RequestInitChain{ - ConsensusParams: &cmtproto.ConsensusParams{}, - }) - require.NoError(t, err) - - // execute a tx that will fail ante handler execution - // - // NOTE: State should not be mutated here. This will be implicitly checked by - // the next txs ante handler execution (anteHandlerTxTest). - tx := newTxCounter(t, suite.txConfig, 0, 0) - tx = setFailOnAnte(t, suite.txConfig, tx, true) - - txBytes, err := suite.txConfig.TxEncoder()(tx) - require.NoError(t, err) - - res, err := suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1, Txs: [][]byte{txBytes}}) - require.NoError(t, err) - require.Empty(t, res.Events) - require.False(t, res.TxResults[0].IsOK(), fmt.Sprintf("%v", res)) - - ctx := getFinalizeBlockStateCtx(suite.baseApp) - store := ctx.KVStore(capKey1) - require.Equal(t, int64(0), getIntFromStore(t, store, anteKey)) - - // execute at tx that will pass the ante handler (the checkTx state should - // mutate) but will fail the message handler - tx = newTxCounter(t, suite.txConfig, 0, 0) - tx = setFailOnHandler(suite.txConfig, tx, true) - - txBytes, err = suite.txConfig.TxEncoder()(tx) - require.NoError(t, err) - - res, err = suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1, Txs: [][]byte{txBytes}}) - require.NoError(t, err) - require.Empty(t, res.Events) - require.False(t, res.TxResults[0].IsOK(), fmt.Sprintf("%v", res)) - - ctx = getFinalizeBlockStateCtx(suite.baseApp) - store = ctx.KVStore(capKey1) - require.Equal(t, int64(1), getIntFromStore(t, store, anteKey)) - require.Equal(t, int64(0), getIntFromStore(t, store, deliverKey)) - - // Execute a successful ante handler and message execution where state is - // implicitly checked by previous tx executions. - tx = newTxCounter(t, suite.txConfig, 1, 0) - - txBytes, err = suite.txConfig.TxEncoder()(tx) - require.NoError(t, err) - - res, err = suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1, Txs: [][]byte{txBytes}}) - require.NoError(t, err) - require.NotEmpty(t, res.TxResults[0].Events) - require.True(t, res.TxResults[0].IsOK(), fmt.Sprintf("%v", res)) - - ctx = getFinalizeBlockStateCtx(suite.baseApp) - store = ctx.KVStore(capKey1) - require.Equal(t, int64(2), getIntFromStore(t, store, anteKey)) - require.Equal(t, int64(1), getIntFromStore(t, store, deliverKey)) - - suite.baseApp.Commit() -} - -// Test and ensure that invalid block heights always cause errors. -// See issues: -// - https://github.com/cosmos/cosmos-sdk/issues/11220 -// - https://github.com/cosmos/cosmos-sdk/issues/7662 -func TestABCI_CreateQueryContext(t *testing.T) { - t.Parallel() - - db := dbm.NewMemDB() - name := t.Name() - app := baseapp.NewBaseApp(name, log.NewTestLogger(t), db, nil) - - _, err := app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1}) - require.NoError(t, err) - _, err = app.Commit() - require.NoError(t, err) - - _, err = app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 2}) - require.NoError(t, err) - _, err = app.Commit() - require.NoError(t, err) - - testCases := []struct { - name string - height int64 - prove bool - expErr bool - }{ - {"valid height", 2, true, false}, - {"future height", 10, true, true}, - {"negative height, prove=true", -1, true, true}, - {"negative height, prove=false", -1, false, true}, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - _, err := app.CreateQueryContext(tc.height, tc.prove) - if tc.expErr { - require.Error(t, err) - } else { - require.NoError(t, err) - } - }) - } -} - -func TestSetMinGasPrices(t *testing.T) { - minGasPrices := sdk.DecCoins{sdk.NewInt64DecCoin("stake", 5000)} - suite := NewBaseAppSuite(t, baseapp.SetMinGasPrices(minGasPrices.String())) - - ctx := getCheckStateCtx(suite.baseApp) - require.Equal(t, minGasPrices, ctx.MinGasPrices()) -} - -type ctxType string - -const ( - QueryCtx ctxType = "query" - CheckTxCtx ctxType = "checkTx" -) - -var ctxTypes = []ctxType{QueryCtx, CheckTxCtx} - -func (c ctxType) GetCtx(t *testing.T, bapp *baseapp.BaseApp) sdk.Context { - t.Helper() - if c == QueryCtx { - ctx, err := bapp.CreateQueryContext(1, false) - require.NoError(t, err) - return ctx - } else if c == CheckTxCtx { - return getCheckStateCtx(bapp) - } - // TODO: Not supported yet - return getFinalizeBlockStateCtx(bapp) -} - -func TestQueryGasLimit(t *testing.T) { - testCases := []struct { - queryGasLimit uint64 - gasActuallyUsed uint64 - shouldQueryErr bool - }{ - {queryGasLimit: 100, gasActuallyUsed: 50, shouldQueryErr: false}, // Valid case - {queryGasLimit: 100, gasActuallyUsed: 150, shouldQueryErr: true}, // gasActuallyUsed > queryGasLimit - {queryGasLimit: 0, gasActuallyUsed: 50, shouldQueryErr: false}, // fuzzing with queryGasLimit = 0 - {queryGasLimit: 0, gasActuallyUsed: 0, shouldQueryErr: false}, // both queryGasLimit and gasActuallyUsed are 0 - {queryGasLimit: 200, gasActuallyUsed: 200, shouldQueryErr: false}, // gasActuallyUsed == queryGasLimit - {queryGasLimit: 100, gasActuallyUsed: 1000, shouldQueryErr: true}, // gasActuallyUsed > queryGasLimit - } - - for _, tc := range testCases { - for _, ctxType := range ctxTypes { - t.Run(fmt.Sprintf("%s: %d - %d", ctxType, tc.queryGasLimit, tc.gasActuallyUsed), func(t *testing.T) { - app := getQueryBaseapp(t) - baseapp.SetQueryGasLimit(tc.queryGasLimit)(app) - ctx := ctxType.GetCtx(t, app) - - // query gas limit should have no effect when CtxType != QueryCtx - if tc.shouldQueryErr && ctxType == QueryCtx { - require.Panics(t, func() { ctx.GasMeter().ConsumeGas(tc.gasActuallyUsed, "test") }) - } else { - require.NotPanics(t, func() { ctx.GasMeter().ConsumeGas(tc.gasActuallyUsed, "test") }) - } - }) - } - } -} - -func TestGetMaximumBlockGas(t *testing.T) { - suite := NewBaseAppSuite(t) - _, err := suite.baseApp.InitChain(&abci.RequestInitChain{}) - require.NoError(t, err) - - ctx := suite.baseApp.NewContext(true) - - suite.baseApp.StoreConsensusParams(ctx, cmtproto.ConsensusParams{Block: &cmtproto.BlockParams{MaxGas: 0}}) - require.Equal(t, uint64(0), suite.baseApp.GetMaximumBlockGas(ctx)) - - suite.baseApp.StoreConsensusParams(ctx, cmtproto.ConsensusParams{Block: &cmtproto.BlockParams{MaxGas: -1}}) - require.Equal(t, uint64(0), suite.baseApp.GetMaximumBlockGas(ctx)) - - suite.baseApp.StoreConsensusParams(ctx, cmtproto.ConsensusParams{Block: &cmtproto.BlockParams{MaxGas: 5000000}}) - require.Equal(t, uint64(5000000), suite.baseApp.GetMaximumBlockGas(ctx)) - - suite.baseApp.StoreConsensusParams(ctx, cmtproto.ConsensusParams{Block: &cmtproto.BlockParams{MaxGas: -5000000}}) - require.Panics(t, func() { suite.baseApp.GetMaximumBlockGas(ctx) }) -} - -func TestLoadVersionPruning(t *testing.T) { - logger := log.NewNopLogger() - pruningOptions := pruningtypes.NewCustomPruningOptions(10, 15) - pruningOpt := baseapp.SetPruning(pruningOptions) - db := dbm.NewMemDB() - name := t.Name() - app := baseapp.NewBaseApp(name, logger, db, nil, pruningOpt) - - // make a cap key and mount the store - capKey := storetypes.NewKVStoreKey("key1") - app.MountStores(capKey) - - err := app.LoadLatestVersion() // needed to make stores non-nil - require.Nil(t, err) - - emptyCommitID := storetypes.CommitID{} - - // fresh store has zero/empty last commit - lastHeight := app.LastBlockHeight() - lastID := app.LastCommitID() - require.Equal(t, int64(0), lastHeight) - require.Equal(t, emptyCommitID, lastID) - - var lastCommitID storetypes.CommitID - - // Commit seven blocks, of which 7 (latest) is kept in addition to 6, 5 - // (keep recent) and 3 (keep every). - for i := int64(1); i <= 7; i++ { - res, err := app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: i}) - require.NoError(t, err) - _, err = app.Commit() - require.NoError(t, err) - lastCommitID = storetypes.CommitID{Version: i, Hash: res.AppHash} - } - - for _, v := range []int64{1, 2, 4} { - _, err = app.CommitMultiStore().CacheMultiStoreWithVersion(v) - require.NoError(t, err) - } - - for _, v := range []int64{3, 5, 6, 7} { - _, err = app.CommitMultiStore().CacheMultiStoreWithVersion(v) - require.NoError(t, err) - } - - // reload with LoadLatestVersion, check it loads last version - app = baseapp.NewBaseApp(name, logger, db, nil, pruningOpt) - app.MountStores(capKey) - - err = app.LoadLatestVersion() - require.Nil(t, err) - testLoadVersionHelper(t, app, int64(7), lastCommitID) -} diff --git a/baseapp/block_gas_test.go b/baseapp/block_gas_test.go deleted file mode 100644 index adc3e706f2..0000000000 --- a/baseapp/block_gas_test.go +++ /dev/null @@ -1,257 +0,0 @@ -package baseapp_test - -import ( - "context" - "math" - "testing" - - abci "github.com/cometbft/cometbft/abci/types" - cmtjson "github.com/cometbft/cometbft/libs/json" - dbm "github.com/cosmos/cosmos-db" - "github.com/stretchr/testify/require" - - "cosmossdk.io/depinject" - "cosmossdk.io/log" - sdkmath "cosmossdk.io/math" - store "cosmossdk.io/store/types" - - baseapptestutil "github.com/cosmos/cosmos-sdk/baseapp/testutil" - "github.com/cosmos/cosmos-sdk/client" - "github.com/cosmos/cosmos-sdk/client/tx" - "github.com/cosmos/cosmos-sdk/codec" - codectypes "github.com/cosmos/cosmos-sdk/codec/types" - cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" - "github.com/cosmos/cosmos-sdk/runtime" - "github.com/cosmos/cosmos-sdk/testutil/configurator" - simtestutil "github.com/cosmos/cosmos-sdk/testutil/sims" - "github.com/cosmos/cosmos-sdk/testutil/testdata" - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" - txtypes "github.com/cosmos/cosmos-sdk/types/tx" - "github.com/cosmos/cosmos-sdk/types/tx/signing" - authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper" - xauthsigning "github.com/cosmos/cosmos-sdk/x/auth/signing" - bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper" - banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" - minttypes "github.com/cosmos/cosmos-sdk/x/mint/types" -) - -var blockMaxGas = uint64(simtestutil.DefaultConsensusParams.Block.MaxGas) - -type BlockGasImpl struct { - panicTx bool - gasToConsume uint64 - key store.StoreKey -} - -func (m BlockGasImpl) Set(ctx context.Context, msg *baseapptestutil.MsgKeyValue) (*baseapptestutil.MsgCreateKeyValueResponse, error) { - sdkCtx := sdk.UnwrapSDKContext(ctx) - sdkCtx.KVStore(m.key).Set(msg.Key, msg.Value) - sdkCtx.GasMeter().ConsumeGas(m.gasToConsume, "TestMsg") - if m.panicTx { - panic("panic in tx execution") - } - return &baseapptestutil.MsgCreateKeyValueResponse{}, nil -} - -func TestBaseApp_BlockGas(t *testing.T) { - testcases := []struct { - name string - gasToConsume uint64 // gas to consume in the msg execution - panicTx bool // panic explicitly in tx execution - expErr bool - }{ - {"less than block gas meter", 10, false, false}, - {"more than block gas meter", blockMaxGas, false, true}, - {"more than block gas meter", uint64(float64(blockMaxGas) * 1.2), false, true}, - {"consume MaxUint64", math.MaxUint64, true, true}, - {"consume MaxGasWanted", txtypes.MaxGasWanted, false, true}, - {"consume block gas when panicked", 10, true, true}, - } - - for _, tc := range testcases { - var ( - bankKeeper bankkeeper.Keeper - accountKeeper authkeeper.AccountKeeper - appBuilder *runtime.AppBuilder - txConfig client.TxConfig - cdc codec.Codec - interfaceRegistry codectypes.InterfaceRegistry - err error - ) - - err = depinject.Inject( - depinject.Configs( - configurator.NewAppConfig( - configurator.AuthModule(), - configurator.TxModule(), - configurator.ParamsModule(), - configurator.ConsensusModule(), - configurator.BankModule(), - configurator.StakingModule(), - ), - depinject.Supply(log.NewNopLogger()), - ), - &bankKeeper, - &accountKeeper, - &interfaceRegistry, - &txConfig, - &cdc, - &appBuilder) - require.NoError(t, err) - - bapp := appBuilder.Build(dbm.NewMemDB(), nil) - err = bapp.Load(true) - require.NoError(t, err) - - t.Run(tc.name, func(t *testing.T) { - baseapptestutil.RegisterInterfaces(interfaceRegistry) - baseapptestutil.RegisterKeyValueServer(bapp.MsgServiceRouter(), BlockGasImpl{ - panicTx: tc.panicTx, - gasToConsume: tc.gasToConsume, - key: bapp.UnsafeFindStoreKey(banktypes.ModuleName), - }) - - genState := GenesisStateWithSingleValidator(t, cdc, appBuilder) - stateBytes, err := cmtjson.MarshalIndent(genState, "", " ") - require.NoError(t, err) - bapp.InitChain(&abci.RequestInitChain{ - Validators: []abci.ValidatorUpdate{}, - ConsensusParams: simtestutil.DefaultConsensusParams, - AppStateBytes: stateBytes, - }) - - ctx := bapp.NewContext(false) - - // tx fee - feeCoin := sdk.NewCoin("atom", sdkmath.NewInt(150)) - feeAmount := sdk.NewCoins(feeCoin) - - // test account and fund - priv1, _, addr1 := testdata.KeyTestPubAddr() - err = bankKeeper.MintCoins(ctx, minttypes.ModuleName, feeAmount) - require.NoError(t, err) - err = bankKeeper.SendCoinsFromModuleToAccount(ctx, minttypes.ModuleName, addr1, feeAmount) - require.NoError(t, err) - require.Equal(t, feeCoin.Amount, bankKeeper.GetBalance(ctx, addr1, feeCoin.Denom).Amount) - seq := accountKeeper.GetAccount(ctx, addr1).GetSequence() - require.Equal(t, uint64(0), seq) - - // msg and signatures - msg := &baseapptestutil.MsgKeyValue{ - Key: []byte("ok"), - Value: []byte("ok"), - Signer: addr1.String(), - } - - txBuilder := txConfig.NewTxBuilder() - - require.NoError(t, txBuilder.SetMsgs(msg)) - txBuilder.SetFeeAmount(feeAmount) - txBuilder.SetGasLimit(uint64(simtestutil.DefaultConsensusParams.Block.MaxGas)) - - senderAccountNumber := accountKeeper.GetAccount(ctx, addr1).GetAccountNumber() - privs, accNums, accSeqs := []cryptotypes.PrivKey{priv1}, []uint64{senderAccountNumber}, []uint64{0} - _, txBytes, err := createTestTx(txConfig, txBuilder, privs, accNums, accSeqs, ctx.ChainID()) - require.NoError(t, err) - - rsp, err := bapp.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1, Txs: [][]byte{txBytes}}) - require.NoError(t, err) - - // check result - ctx = bapp.GetContextForFinalizeBlock(txBytes) - okValue := ctx.KVStore(bapp.UnsafeFindStoreKey(banktypes.ModuleName)).Get([]byte("ok")) - - if tc.expErr { - if tc.panicTx { - require.Equal(t, sdkerrors.ErrPanic.ABCICode(), rsp.TxResults[0].Code) - } else { - require.Equal(t, sdkerrors.ErrOutOfGas.ABCICode(), rsp.TxResults[0].Code) - } - require.Empty(t, okValue) - } else { - require.Equal(t, uint32(0), rsp.TxResults[0].Code) - require.Equal(t, []byte("ok"), okValue) - } - // check block gas is always consumed - baseGas := uint64(57504) // baseGas is the gas consumed before tx msg - expGasConsumed := addUint64Saturating(tc.gasToConsume, baseGas) - if expGasConsumed > uint64(simtestutil.DefaultConsensusParams.Block.MaxGas) { - // capped by gasLimit - expGasConsumed = uint64(simtestutil.DefaultConsensusParams.Block.MaxGas) - } - require.Equal(t, int(expGasConsumed), int(ctx.BlockGasMeter().GasConsumed())) - // tx fee is always deducted - require.Equal(t, int64(0), bankKeeper.GetBalance(ctx, addr1, feeCoin.Denom).Amount.Int64()) - // sender's sequence is always increased - seq = accountKeeper.GetAccount(ctx, addr1).GetSequence() - require.NoError(t, err) - require.Equal(t, uint64(1), seq) - }) - } -} - -func createTestTx(txConfig client.TxConfig, txBuilder client.TxBuilder, privs []cryptotypes.PrivKey, accNums, accSeqs []uint64, chainID string) (xauthsigning.Tx, []byte, error) { - defaultSignMode, err := xauthsigning.APISignModeToInternal(txConfig.SignModeHandler().DefaultMode()) - if err != nil { - return nil, nil, err - } - // First round: we gather all the signer infos. We use the "set empty - // signature" hack to do that. - var sigsV2 []signing.SignatureV2 - for i, priv := range privs { - sigV2 := signing.SignatureV2{ - PubKey: priv.PubKey(), - Data: &signing.SingleSignatureData{ - SignMode: defaultSignMode, - Signature: nil, - }, - Sequence: accSeqs[i], - } - - sigsV2 = append(sigsV2, sigV2) - } - err = txBuilder.SetSignatures(sigsV2...) - if err != nil { - return nil, nil, err - } - - // Second round: all signer infos are set, so each signer can sign. - sigsV2 = []signing.SignatureV2{} - for i, priv := range privs { - signerData := xauthsigning.SignerData{ - Address: sdk.AccAddress(priv.PubKey().Bytes()).String(), - ChainID: chainID, - AccountNumber: accNums[i], - Sequence: accSeqs[i], - PubKey: priv.PubKey(), - } - sigV2, err := tx.SignWithPrivKey( - context.TODO(), defaultSignMode, signerData, - txBuilder, priv, txConfig, accSeqs[i]) - if err != nil { - return nil, nil, err - } - - sigsV2 = append(sigsV2, sigV2) - } - err = txBuilder.SetSignatures(sigsV2...) - if err != nil { - return nil, nil, err - } - - txBytes, err := txConfig.TxEncoder()(txBuilder.GetTx()) - if err != nil { - return nil, nil, err - } - - return txBuilder.GetTx(), txBytes, nil -} - -func addUint64Saturating(a, b uint64) uint64 { - if math.MaxUint64-a < b { - return math.MaxUint64 - } - - return a + b -} diff --git a/baseapp/circuit.go b/baseapp/circuit.go deleted file mode 100644 index 3db0bc1bdc..0000000000 --- a/baseapp/circuit.go +++ /dev/null @@ -1,8 +0,0 @@ -package baseapp - -import "context" - -// CircuitBreaker is an interface that defines the methods for a circuit breaker. -type CircuitBreaker interface { - IsAllowed(ctx context.Context, typeURL string) (bool, error) -} diff --git a/baseapp/genesis.go b/baseapp/genesis.go deleted file mode 100644 index 4a6b0082b6..0000000000 --- a/baseapp/genesis.go +++ /dev/null @@ -1,23 +0,0 @@ -package baseapp - -import ( - "errors" - - "github.com/cometbft/cometbft/abci/types" - - "cosmossdk.io/core/genesis" -) - -var _ genesis.TxHandler = (*BaseApp)(nil) - -// ExecuteGenesisTx implements genesis.GenesisState from -// cosmossdk.io/core/genesis to set initial state in genesis -func (ba BaseApp) ExecuteGenesisTx(tx []byte) error { - res := ba.deliverTx(tx) - - if res.Code != types.CodeTypeOK { - return errors.New(res.Log) - } - - return nil -} diff --git a/baseapp/grpcrouter.go b/baseapp/grpcrouter.go deleted file mode 100644 index 9955ecb460..0000000000 --- a/baseapp/grpcrouter.go +++ /dev/null @@ -1,159 +0,0 @@ -package baseapp - -import ( - "context" - "fmt" - - abci "github.com/cometbft/cometbft/abci/types" - gogogrpc "github.com/cosmos/gogoproto/grpc" - "google.golang.org/grpc" - "google.golang.org/grpc/encoding" - "google.golang.org/protobuf/runtime/protoiface" - - "github.com/cosmos/cosmos-sdk/baseapp/internal/protocompat" - "github.com/cosmos/cosmos-sdk/client/grpc/reflection" - "github.com/cosmos/cosmos-sdk/codec" - codectypes "github.com/cosmos/cosmos-sdk/codec/types" - sdk "github.com/cosmos/cosmos-sdk/types" -) - -// GRPCQueryRouter routes ABCI Query requests to GRPC handlers -type GRPCQueryRouter struct { - // routes maps query handlers used in ABCIQuery. - routes map[string]GRPCQueryHandler - // hybridHandlers maps the request name to the handler. It is a hybrid handler which seamlessly - // handles both gogo and protov2 messages. - hybridHandlers map[string][]func(ctx context.Context, req, resp protoiface.MessageV1) error - // binaryCodec is used to encode/decode binary protobuf messages. - binaryCodec codec.BinaryCodec - // cdc is the gRPC codec used by the router to correctly unmarshal messages. - cdc encoding.Codec - // serviceData contains the gRPC services and their handlers. - serviceData []serviceData -} - -// serviceData represents a gRPC service, along with its handler. -type serviceData struct { - serviceDesc *grpc.ServiceDesc - handler interface{} -} - -var _ gogogrpc.Server = &GRPCQueryRouter{} - -// NewGRPCQueryRouter creates a new GRPCQueryRouter -func NewGRPCQueryRouter() *GRPCQueryRouter { - return &GRPCQueryRouter{ - routes: map[string]GRPCQueryHandler{}, - hybridHandlers: map[string][]func(ctx context.Context, req, resp protoiface.MessageV1) error{}, - } -} - -// GRPCQueryHandler defines a function type which handles ABCI Query requests -// using gRPC -type GRPCQueryHandler = func(ctx sdk.Context, req *abci.RequestQuery) (*abci.ResponseQuery, error) - -// Route returns the GRPCQueryHandler for a given query route path or nil -// if not found -func (qrt *GRPCQueryRouter) Route(path string) GRPCQueryHandler { - handler, found := qrt.routes[path] - if !found { - return nil - } - return handler -} - -// RegisterService implements the gRPC Server.RegisterService method. sd is a gRPC -// service description, handler is an object which implements that gRPC service/ -// -// This functions PANICS: -// - if a protobuf service is registered twice. -func (qrt *GRPCQueryRouter) RegisterService(sd *grpc.ServiceDesc, handler interface{}) { - // adds a top-level query handler based on the gRPC service name - for _, method := range sd.Methods { - err := qrt.registerABCIQueryHandler(sd, method, handler) - if err != nil { - panic(err) - } - err = qrt.registerHybridHandler(sd, method, handler) - if err != nil { - panic(err) - } - } - - qrt.serviceData = append(qrt.serviceData, serviceData{ - serviceDesc: sd, - handler: handler, - }) -} - -func (qrt *GRPCQueryRouter) registerABCIQueryHandler(sd *grpc.ServiceDesc, method grpc.MethodDesc, handler interface{}) error { - fqName := fmt.Sprintf("/%s/%s", sd.ServiceName, method.MethodName) - methodHandler := method.Handler - - // Check that each service is only registered once. If a service is - // registered more than once, then we should error. Since we can't - // return an error (`Server.RegisterService` interface restriction) we - // panic (at startup). - _, found := qrt.routes[fqName] - if found { - return fmt.Errorf( - "gRPC query service %s has already been registered. Please make sure to only register each service once. "+ - "This usually means that there are conflicting modules registering the same gRPC query service", - fqName, - ) - } - - qrt.routes[fqName] = func(ctx sdk.Context, req *abci.RequestQuery) (*abci.ResponseQuery, error) { - // call the method handler from the service description with the handler object, - // a wrapped sdk.Context with proto-unmarshaled data from the ABCI request data - res, err := methodHandler(handler, ctx, func(i interface{}) error { - return qrt.cdc.Unmarshal(req.Data, i) - }, nil) - if err != nil { - return nil, err - } - - // proto marshal the result bytes - var resBytes []byte - resBytes, err = qrt.cdc.Marshal(res) - if err != nil { - return nil, err - } - - // return the result bytes as the response value - return &abci.ResponseQuery{ - Height: req.Height, - Value: resBytes, - }, nil - } - return nil -} - -func (qrt *GRPCQueryRouter) HybridHandlerByRequestName(name string) []func(ctx context.Context, req, resp protoiface.MessageV1) error { - return qrt.hybridHandlers[name] -} - -func (qrt *GRPCQueryRouter) registerHybridHandler(sd *grpc.ServiceDesc, method grpc.MethodDesc, handler interface{}) error { - // extract message name from method descriptor - inputName, err := protocompat.RequestFullNameFromMethodDesc(sd, method) - if err != nil { - return err - } - methodHandler, err := protocompat.MakeHybridHandler(qrt.binaryCodec, sd, method, handler) - if err != nil { - return err - } - qrt.hybridHandlers[string(inputName)] = append(qrt.hybridHandlers[string(inputName)], methodHandler) - return nil -} - -// SetInterfaceRegistry sets the interface registry for the router. This will -// also register the interface reflection gRPC service. -func (qrt *GRPCQueryRouter) SetInterfaceRegistry(interfaceRegistry codectypes.InterfaceRegistry) { - // instantiate the codec - qrt.cdc = codec.NewProtoCodec(interfaceRegistry).GRPCCodec() - qrt.binaryCodec = codec.NewProtoCodec(interfaceRegistry) - // Once we have an interface registry, we can register the interface - // registry reflection gRPC service. - reflection.RegisterReflectionServiceServer(qrt, reflection.NewReflectionServiceServer(interfaceRegistry)) -} diff --git a/baseapp/grpcrouter_helpers.go b/baseapp/grpcrouter_helpers.go deleted file mode 100644 index 9f737e0e06..0000000000 --- a/baseapp/grpcrouter_helpers.go +++ /dev/null @@ -1,64 +0,0 @@ -package baseapp - -import ( - gocontext "context" - "fmt" - - abci "github.com/cometbft/cometbft/abci/types" - gogogrpc "github.com/cosmos/gogoproto/grpc" - "google.golang.org/grpc" - - "github.com/cosmos/cosmos-sdk/codec/types" - sdk "github.com/cosmos/cosmos-sdk/types" -) - -// QueryServiceTestHelper provides a helper for making grpc query service -// rpc calls in unit tests. It implements both the grpc Server and ClientConn -// interfaces needed to register a query service server and create a query -// service client. -type QueryServiceTestHelper struct { - *GRPCQueryRouter - Ctx sdk.Context -} - -var ( - _ gogogrpc.Server = &QueryServiceTestHelper{} - _ gogogrpc.ClientConn = &QueryServiceTestHelper{} -) - -// NewQueryServerTestHelper creates a new QueryServiceTestHelper that wraps -// the provided sdk.Context -func NewQueryServerTestHelper(ctx sdk.Context, interfaceRegistry types.InterfaceRegistry) *QueryServiceTestHelper { - qrt := NewGRPCQueryRouter() - qrt.SetInterfaceRegistry(interfaceRegistry) - return &QueryServiceTestHelper{GRPCQueryRouter: qrt, Ctx: ctx} -} - -// Invoke implements the grpc ClientConn.Invoke method -func (q *QueryServiceTestHelper) Invoke(_ gocontext.Context, method string, args, reply interface{}, _ ...grpc.CallOption) error { - querier := q.Route(method) - if querier == nil { - return fmt.Errorf("handler not found for %s", method) - } - reqBz, err := q.cdc.Marshal(args) - if err != nil { - return err - } - - res, err := querier(q.Ctx, &abci.RequestQuery{Data: reqBz}) - if err != nil { - return err - } - - err = q.cdc.Unmarshal(res.Value, reply) - if err != nil { - return err - } - - return nil -} - -// NewStream implements the grpc ClientConn.NewStream method -func (q *QueryServiceTestHelper) NewStream(gocontext.Context, *grpc.StreamDesc, string, ...grpc.CallOption) (grpc.ClientStream, error) { - return nil, fmt.Errorf("not supported") -} diff --git a/baseapp/grpcrouter_test.go b/baseapp/grpcrouter_test.go deleted file mode 100644 index d747fbb747..0000000000 --- a/baseapp/grpcrouter_test.go +++ /dev/null @@ -1,223 +0,0 @@ -package baseapp_test - -import ( - "context" - "sync" - "testing" - - dbm "github.com/cosmos/cosmos-db" - "github.com/stretchr/testify/require" - - "cosmossdk.io/depinject" - "cosmossdk.io/log" - - "github.com/cosmos/cosmos-sdk/baseapp" - "github.com/cosmos/cosmos-sdk/codec/types" - "github.com/cosmos/cosmos-sdk/runtime" - "github.com/cosmos/cosmos-sdk/testutil/testdata" - testdata_pulsar "github.com/cosmos/cosmos-sdk/testutil/testdata/testpb" - sdk "github.com/cosmos/cosmos-sdk/types" -) - -func TestGRPCQueryRouter(t *testing.T) { - qr := baseapp.NewGRPCQueryRouter() - interfaceRegistry := testdata.NewTestInterfaceRegistry() - qr.SetInterfaceRegistry(interfaceRegistry) - testdata_pulsar.RegisterQueryServer(qr, testdata_pulsar.QueryImpl{}) - helper := &baseapp.QueryServiceTestHelper{ - GRPCQueryRouter: qr, - Ctx: sdk.Context{}.WithContext(context.Background()), - } - client := testdata.NewQueryClient(helper) - - res, err := client.Echo(context.Background(), &testdata.EchoRequest{Message: "hello"}) - require.Nil(t, err) - require.NotNil(t, res) - require.Equal(t, "hello", res.Message) - - res, err = client.Echo(context.Background(), nil) - require.Nil(t, err) - require.Empty(t, res.Message) - - res2, err := client.SayHello(context.Background(), &testdata.SayHelloRequest{Name: "Foo"}) - require.Nil(t, err) - require.NotNil(t, res) - require.Equal(t, "Hello Foo!", res2.Greeting) - - spot := &testdata.Dog{Name: "Spot", Size_: "big"} - any, err := types.NewAnyWithValue(spot) - require.NoError(t, err) - res3, err := client.TestAny(context.Background(), &testdata.TestAnyRequest{AnyAnimal: any}) - require.NoError(t, err) - require.NotNil(t, res3) - require.Equal(t, spot, res3.HasAnimal.Animal.GetCachedValue()) -} - -func TestGRPCRouterHybridHandlers(t *testing.T) { - assertRouterBehaviour := func(helper *baseapp.QueryServiceTestHelper) { - // test getting the handler by name - handlers := helper.GRPCQueryRouter.HybridHandlerByRequestName("testpb.EchoRequest") - require.NotNil(t, handlers) - require.Len(t, handlers, 1) - handler := handlers[0] - // sending a protov2 message should work, and return a protov2 message - v2Resp := new(testdata_pulsar.EchoResponse) - err := handler(helper.Ctx, &testdata_pulsar.EchoRequest{Message: "hello"}, v2Resp) - require.Nil(t, err) - require.Equal(t, "hello", v2Resp.Message) - // also sending a protov1 message should work, and return a gogoproto message - gogoResp := new(testdata.EchoResponse) - err = handler(helper.Ctx, &testdata.EchoRequest{Message: "hello"}, gogoResp) - require.NoError(t, err) - require.Equal(t, "hello", gogoResp.Message) - } - - t.Run("protov2 server", func(t *testing.T) { - qr := baseapp.NewGRPCQueryRouter() - interfaceRegistry := testdata.NewTestInterfaceRegistry() - qr.SetInterfaceRegistry(interfaceRegistry) - testdata_pulsar.RegisterQueryServer(qr, testdata_pulsar.QueryImpl{}) - helper := &baseapp.QueryServiceTestHelper{ - GRPCQueryRouter: qr, - Ctx: sdk.Context{}.WithContext(context.Background()), - } - assertRouterBehaviour(helper) - }) - - t.Run("gogoproto server", func(t *testing.T) { - qr := baseapp.NewGRPCQueryRouter() - interfaceRegistry := testdata.NewTestInterfaceRegistry() - qr.SetInterfaceRegistry(interfaceRegistry) - testdata.RegisterQueryServer(qr, testdata.QueryImpl{}) - helper := &baseapp.QueryServiceTestHelper{ - GRPCQueryRouter: qr, - Ctx: sdk.Context{}.WithContext(context.Background()), - } - assertRouterBehaviour(helper) - }) -} - -func TestRegisterQueryServiceTwice(t *testing.T) { - // Setup baseapp. - var appBuilder *runtime.AppBuilder - err := depinject.Inject( - depinject.Configs( - makeMinimalConfig(), - depinject.Supply(log.NewTestLogger(t)), - ), - &appBuilder) - require.NoError(t, err) - db := dbm.NewMemDB() - app := appBuilder.Build(db, nil) - - // First time registering service shouldn't panic. - require.NotPanics(t, func() { - testdata.RegisterQueryServer( - app.GRPCQueryRouter(), - testdata.QueryImpl{}, - ) - }) - - // Second time should panic. - require.Panics(t, func() { - testdata.RegisterQueryServer( - app.GRPCQueryRouter(), - testdata.QueryImpl{}, - ) - }) -} - -// Tests that we don't have data races per -// https://github.com/cosmos/cosmos-sdk/issues/10324 -// but with the same client connection being used concurrently. -func TestQueryDataRaces_sameConnectionToSameHandler(t *testing.T) { - var mu sync.Mutex - var helper *baseapp.QueryServiceTestHelper - makeClientConn := func(qr *baseapp.GRPCQueryRouter) *baseapp.QueryServiceTestHelper { - mu.Lock() - defer mu.Unlock() - - if helper == nil { - helper = &baseapp.QueryServiceTestHelper{ - GRPCQueryRouter: qr, - Ctx: sdk.Context{}.WithContext(context.Background()), - } - } - return helper - } - testQueryDataRacesSameHandler(t, makeClientConn) -} - -// Tests that we don't have data races per -// https://github.com/cosmos/cosmos-sdk/issues/10324 -// but with unique client connections requesting from the same handler concurrently. -func TestQueryDataRaces_uniqueConnectionsToSameHandler(t *testing.T) { - // Return a new handler for every single call. - testQueryDataRacesSameHandler(t, func(qr *baseapp.GRPCQueryRouter) *baseapp.QueryServiceTestHelper { - return &baseapp.QueryServiceTestHelper{ - GRPCQueryRouter: qr, - Ctx: sdk.Context{}.WithContext(context.Background()), - } - }) -} - -func testQueryDataRacesSameHandler(t *testing.T, makeClientConn func(*baseapp.GRPCQueryRouter) *baseapp.QueryServiceTestHelper) { - t.Parallel() - - qr := baseapp.NewGRPCQueryRouter() - interfaceRegistry := testdata.NewTestInterfaceRegistry() - qr.SetInterfaceRegistry(interfaceRegistry) - testdata.RegisterQueryServer(qr, testdata.QueryImpl{}) - - // The goal is to invoke the router concurrently and check for any data races. - // 0. Run with: go test -race - // 1. Synchronize every one of the 1,000 goroutines waiting to all query at the - // same time. - // 2. Once the greenlight is given, perform a query through the router. - var wg sync.WaitGroup - defer wg.Wait() - - greenlight := make(chan bool) - n := 1000 - ready := make(chan bool, n) - go func() { - for i := 0; i < n; i++ { - <-ready - } - close(greenlight) - }() - - for i := 0; i < n; i++ { - wg.Add(1) - go func() { - defer wg.Done() - - // Wait until we get the green light to start. - ready <- true - <-greenlight - - client := testdata.NewQueryClient(makeClientConn(qr)) - res, err := client.Echo(context.Background(), &testdata.EchoRequest{Message: "hello"}) - require.Nil(t, err) - require.NotNil(t, res) - require.Equal(t, "hello", res.Message) - - res, err = client.Echo(context.Background(), nil) - require.Nil(t, err) - require.Empty(t, res.Message) - - res2, err := client.SayHello(context.Background(), &testdata.SayHelloRequest{Name: "Foo"}) - require.Nil(t, err) - require.NotNil(t, res) - require.Equal(t, "Hello Foo!", res2.Greeting) - - spot := &testdata.Dog{Name: "Spot", Size_: "big"} - any, err := types.NewAnyWithValue(spot) - require.NoError(t, err) - res3, err := client.TestAny(context.Background(), &testdata.TestAnyRequest{AnyAnimal: any}) - require.NoError(t, err) - require.NotNil(t, res3) - require.Equal(t, spot, res3.HasAnimal.Animal.GetCachedValue()) - }() - } -} diff --git a/baseapp/grpcserver.go b/baseapp/grpcserver.go deleted file mode 100644 index 20ea6ecac1..0000000000 --- a/baseapp/grpcserver.go +++ /dev/null @@ -1,102 +0,0 @@ -package baseapp - -import ( - "context" - "strconv" - - gogogrpc "github.com/cosmos/gogoproto/grpc" - grpcmiddleware "github.com/grpc-ecosystem/go-grpc-middleware" - grpcrecovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" - - errorsmod "cosmossdk.io/errors" - - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" - grpctypes "github.com/cosmos/cosmos-sdk/types/grpc" -) - -// GRPCQueryRouter returns the GRPCQueryRouter of a BaseApp. -func (app *BaseApp) GRPCQueryRouter() *GRPCQueryRouter { return app.grpcQueryRouter } - -// RegisterGRPCServer registers gRPC services directly with the gRPC server. -func (app *BaseApp) RegisterGRPCServer(server gogogrpc.Server) { - // Define an interceptor for all gRPC queries: this interceptor will create - // a new sdk.Context, and pass it into the query handler. - interceptor := func(grpcCtx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { - // If there's some metadata in the context, retrieve it. - md, ok := metadata.FromIncomingContext(grpcCtx) - if !ok { - return nil, status.Error(codes.Internal, "unable to retrieve metadata") - } - - // Get height header from the request context, if present. - var height int64 - if heightHeaders := md.Get(grpctypes.GRPCBlockHeightHeader); len(heightHeaders) == 1 { - height, err = strconv.ParseInt(heightHeaders[0], 10, 64) - if err != nil { - return nil, errorsmod.Wrapf( - sdkerrors.ErrInvalidRequest, - "Baseapp.RegisterGRPCServer: invalid height header %q: %v", grpctypes.GRPCBlockHeightHeader, err) - } - if err := checkNegativeHeight(height); err != nil { - return nil, err - } - } - - // Create the sdk.Context. Passing false as 2nd arg, as we can't - // actually support proofs with gRPC right now. - sdkCtx, err := app.CreateQueryContext(height, false) - if err != nil { - return nil, err - } - - // Add relevant gRPC headers - if height == 0 { - height = sdkCtx.BlockHeight() // If height was not set in the request, set it to the latest - } - - // Attach the sdk.Context into the gRPC's context.Context. - grpcCtx = context.WithValue(grpcCtx, sdk.SdkContextKey, sdkCtx) - - md = metadata.Pairs(grpctypes.GRPCBlockHeightHeader, strconv.FormatInt(height, 10)) - if err = grpc.SetHeader(grpcCtx, md); err != nil { - app.logger.Error("failed to set gRPC header", "err", err) - } - - return handler(grpcCtx, req) - } - - // Loop through all services and methods, add the interceptor, and register - // the service. - for _, data := range app.GRPCQueryRouter().serviceData { - desc := data.serviceDesc - newMethods := make([]grpc.MethodDesc, len(desc.Methods)) - - for i, method := range desc.Methods { - methodHandler := method.Handler - newMethods[i] = grpc.MethodDesc{ - MethodName: method.MethodName, - Handler: func(srv interface{}, ctx context.Context, dec func(interface{}) error, _ grpc.UnaryServerInterceptor) (interface{}, error) { - return methodHandler(srv, ctx, dec, grpcmiddleware.ChainUnaryServer( - grpcrecovery.UnaryServerInterceptor(), - interceptor, - )) - }, - } - } - - newDesc := &grpc.ServiceDesc{ - ServiceName: desc.ServiceName, - HandlerType: desc.HandlerType, - Methods: newMethods, - Streams: desc.Streams, - Metadata: desc.Metadata, - } - - server.RegisterService(newDesc, data.handler) - } -} diff --git a/baseapp/info.go b/baseapp/info.go deleted file mode 100644 index 8c8a445120..0000000000 --- a/baseapp/info.go +++ /dev/null @@ -1,197 +0,0 @@ -package baseapp - -import ( - "time" - - abci "github.com/cometbft/cometbft/abci/types" - - "cosmossdk.io/core/comet" -) - -var _ comet.BlockInfo = (*cometInfo)(nil) - -// CometInfo defines the properties provided by comet to the application -type cometInfo struct { - Misbehavior []abci.Misbehavior - ValidatorsHash []byte - ProposerAddress []byte - LastCommit abci.CommitInfo -} - -func (r cometInfo) GetEvidence() comet.EvidenceList { - return evidenceWrapper{evidence: r.Misbehavior} -} - -func (r cometInfo) GetValidatorsHash() []byte { - return r.ValidatorsHash -} - -func (r cometInfo) GetProposerAddress() []byte { - return r.ProposerAddress -} - -func (r cometInfo) GetLastCommit() comet.CommitInfo { - return commitInfoWrapper{r.LastCommit} -} - -type evidenceWrapper struct { - evidence []abci.Misbehavior -} - -func (e evidenceWrapper) Len() int { - return len(e.evidence) -} - -func (e evidenceWrapper) Get(i int) comet.Evidence { - return misbehaviorWrapper{e.evidence[i]} -} - -// commitInfoWrapper is a wrapper around abci.CommitInfo that implements CommitInfo interface -type commitInfoWrapper struct { - abci.CommitInfo -} - -var _ comet.CommitInfo = (*commitInfoWrapper)(nil) - -func (c commitInfoWrapper) Round() int32 { - return c.CommitInfo.Round -} - -func (c commitInfoWrapper) Votes() comet.VoteInfos { - return abciVoteInfoWrapper{c.CommitInfo.Votes} -} - -// abciVoteInfoWrapper is a wrapper around abci.VoteInfo that implements VoteInfos interface -type abciVoteInfoWrapper struct { - votes []abci.VoteInfo -} - -var _ comet.VoteInfos = (*abciVoteInfoWrapper)(nil) - -func (e abciVoteInfoWrapper) Len() int { - return len(e.votes) -} - -func (e abciVoteInfoWrapper) Get(i int) comet.VoteInfo { - return voteInfoWrapper{e.votes[i]} -} - -// voteInfoWrapper is a wrapper around abci.VoteInfo that implements VoteInfo interface -type voteInfoWrapper struct { - abci.VoteInfo -} - -var _ comet.VoteInfo = (*voteInfoWrapper)(nil) - -func (v voteInfoWrapper) GetBlockIDFlag() comet.BlockIDFlag { - return comet.BlockIDFlag(v.VoteInfo.BlockIdFlag) -} - -func (v voteInfoWrapper) Validator() comet.Validator { - return validatorWrapper{v.VoteInfo.Validator} -} - -// validatorWrapper is a wrapper around abci.Validator that implements Validator interface -type validatorWrapper struct { - abci.Validator -} - -var _ comet.Validator = (*validatorWrapper)(nil) - -func (v validatorWrapper) Address() []byte { - return v.Validator.Address -} - -func (v validatorWrapper) Power() int64 { - return v.Validator.Power -} - -type misbehaviorWrapper struct { - abci.Misbehavior -} - -func (m misbehaviorWrapper) Type() comet.MisbehaviorType { - return comet.MisbehaviorType(m.Misbehavior.Type) -} - -func (m misbehaviorWrapper) Height() int64 { - return m.Misbehavior.Height -} - -func (m misbehaviorWrapper) Validator() comet.Validator { - return validatorWrapper{m.Misbehavior.Validator} -} - -func (m misbehaviorWrapper) Time() time.Time { - return m.Misbehavior.Time -} - -func (m misbehaviorWrapper) TotalVotingPower() int64 { - return m.Misbehavior.TotalVotingPower -} - -type prepareProposalInfo struct { - *abci.RequestPrepareProposal -} - -var _ comet.BlockInfo = (*prepareProposalInfo)(nil) - -func (r prepareProposalInfo) GetEvidence() comet.EvidenceList { - return evidenceWrapper{r.Misbehavior} -} - -func (r prepareProposalInfo) GetValidatorsHash() []byte { - return r.NextValidatorsHash -} - -func (r prepareProposalInfo) GetProposerAddress() []byte { - return r.RequestPrepareProposal.ProposerAddress -} - -func (r prepareProposalInfo) GetLastCommit() comet.CommitInfo { - return extendedCommitInfoWrapper{r.RequestPrepareProposal.LocalLastCommit} -} - -var _ comet.BlockInfo = (*prepareProposalInfo)(nil) - -type extendedCommitInfoWrapper struct { - abci.ExtendedCommitInfo -} - -var _ comet.CommitInfo = (*extendedCommitInfoWrapper)(nil) - -func (e extendedCommitInfoWrapper) Round() int32 { - return e.ExtendedCommitInfo.Round -} - -func (e extendedCommitInfoWrapper) Votes() comet.VoteInfos { - return extendedVoteInfoWrapperList{e.ExtendedCommitInfo.Votes} -} - -type extendedVoteInfoWrapperList struct { - votes []abci.ExtendedVoteInfo -} - -var _ comet.VoteInfos = (*extendedVoteInfoWrapperList)(nil) - -func (e extendedVoteInfoWrapperList) Len() int { - return len(e.votes) -} - -func (e extendedVoteInfoWrapperList) Get(i int) comet.VoteInfo { - return extendedVoteInfoWrapper{e.votes[i]} -} - -type extendedVoteInfoWrapper struct { - abci.ExtendedVoteInfo -} - -var _ comet.VoteInfo = (*extendedVoteInfoWrapper)(nil) - -func (e extendedVoteInfoWrapper) GetBlockIDFlag() comet.BlockIDFlag { - return comet.BlockIDFlag(e.ExtendedVoteInfo.BlockIdFlag) -} - -func (e extendedVoteInfoWrapper) Validator() comet.Validator { - return validatorWrapper{e.ExtendedVoteInfo.Validator} -} diff --git a/baseapp/internal/protocompat/protocompat.go b/baseapp/internal/protocompat/protocompat.go deleted file mode 100644 index b2a97f8890..0000000000 --- a/baseapp/internal/protocompat/protocompat.go +++ /dev/null @@ -1,222 +0,0 @@ -package protocompat - -import ( - "context" - "fmt" - "reflect" - - gogoproto "github.com/cosmos/gogoproto/proto" - "google.golang.org/grpc" - proto2 "google.golang.org/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - "google.golang.org/protobuf/runtime/protoiface" - - "github.com/cosmos/cosmos-sdk/codec" -) - -var ( - gogoType = reflect.TypeOf((*gogoproto.Message)(nil)).Elem() - protov2Type = reflect.TypeOf((*proto2.Message)(nil)).Elem() - protov2MarshalOpts = proto2.MarshalOptions{Deterministic: true} -) - -type Handler = func(ctx context.Context, request, response protoiface.MessageV1) error - -func MakeHybridHandler(cdc codec.BinaryCodec, sd *grpc.ServiceDesc, method grpc.MethodDesc, handler interface{}) (Handler, error) { - methodFullName := protoreflect.FullName(fmt.Sprintf("%s.%s", sd.ServiceName, method.MethodName)) - desc, err := gogoproto.HybridResolver.FindDescriptorByName(methodFullName) - if err != nil { - return nil, err - } - methodDesc, ok := desc.(protoreflect.MethodDescriptor) - if !ok { - return nil, fmt.Errorf("invalid method descriptor %s", methodFullName) - } - - isProtov2Handler, err := isProtov2(method) - if err != nil { - return nil, err - } - if isProtov2Handler { - return makeProtoV2HybridHandler(methodDesc, cdc, method, handler) - } - return makeGogoHybridHandler(methodDesc, cdc, method, handler) -} - -// makeProtoV2HybridHandler returns a handler that can handle both gogo and protov2 messages. -func makeProtoV2HybridHandler(prefMethod protoreflect.MethodDescriptor, cdc codec.BinaryCodec, method grpc.MethodDesc, handler any) (Handler, error) { - // it's a protov2 handler, if a gogo counterparty is not found we cannot handle gogo messages. - gogoExists := gogoproto.MessageType(string(prefMethod.Output().FullName())) != nil - if !gogoExists { - return func(ctx context.Context, inReq, outResp protoiface.MessageV1) error { - protov2Request, ok := inReq.(proto2.Message) - if !ok { - return fmt.Errorf("invalid request type %T, method %s does not accept gogoproto messages", inReq, prefMethod.FullName()) - } - resp, err := method.Handler(handler, ctx, func(msg any) error { - proto2.Merge(msg.(proto2.Message), protov2Request) - return nil - }, nil) - if err != nil { - return err - } - // merge on the resp - proto2.Merge(outResp.(proto2.Message), resp.(proto2.Message)) - return nil - }, nil - } - return func(ctx context.Context, inReq, outResp protoiface.MessageV1) error { - // we check if the request is a protov2 message. - switch m := inReq.(type) { - case proto2.Message: - // we can just call the handler after making a copy of the message, for safety reasons. - resp, err := method.Handler(handler, ctx, func(msg any) error { - proto2.Merge(msg.(proto2.Message), m) - return nil - }, nil) - if err != nil { - return err - } - // merge on the resp - proto2.Merge(outResp.(proto2.Message), resp.(proto2.Message)) - return nil - case gogoproto.Message: - // we need to marshal and unmarshal the request. - requestBytes, err := cdc.Marshal(m) - if err != nil { - return err - } - resp, err := method.Handler(handler, ctx, func(msg any) error { - // unmarshal request into the message. - return proto2.Unmarshal(requestBytes, msg.(proto2.Message)) - }, nil) - if err != nil { - return err - } - // the response is a protov2 message, so we cannot just return it. - // since the request came as gogoproto, we expect the response - // to also be gogoproto. - respBytes, err := protov2MarshalOpts.Marshal(resp.(proto2.Message)) - if err != nil { - return err - } - - // unmarshal response into a gogo message. - return cdc.Unmarshal(respBytes, outResp.(gogoproto.Message)) - default: - panic("unreachable") - } - }, nil -} - -func makeGogoHybridHandler(prefMethod protoreflect.MethodDescriptor, cdc codec.BinaryCodec, method grpc.MethodDesc, handler any) (Handler, error) { - // it's a gogo handler, we check if the existing protov2 counterparty exists. - _, err := protoregistry.GlobalTypes.FindMessageByName(prefMethod.Output().FullName()) - if err != nil { - // this can only be a gogo message. - return func(ctx context.Context, inReq, outResp protoiface.MessageV1) error { - _, ok := inReq.(proto2.Message) - if ok { - return fmt.Errorf("invalid request type %T, method %s does not accept protov2 messages", inReq, prefMethod.FullName()) - } - resp, err := method.Handler(handler, ctx, func(msg any) error { - // merge! ref: https://github.com/cosmos/cosmos-sdk/issues/18003 - gogoproto.Merge(msg.(gogoproto.Message), inReq) - return nil - }, nil) - if err != nil { - return err - } - // merge resp, ref: https://github.com/cosmos/cosmos-sdk/issues/18003 - gogoproto.Merge(outResp.(gogoproto.Message), resp.(gogoproto.Message)) - return nil - }, nil - } - // this is a gogo handler, and we have a protov2 counterparty. - return func(ctx context.Context, inReq, outResp protoiface.MessageV1) error { - switch m := inReq.(type) { - case proto2.Message: - // we need to marshal and unmarshal the request. - requestBytes, err := protov2MarshalOpts.Marshal(m) - if err != nil { - return err - } - resp, err := method.Handler(handler, ctx, func(msg any) error { - // unmarshal request into the message. - return cdc.Unmarshal(requestBytes, msg.(gogoproto.Message)) - }, nil) - if err != nil { - return err - } - // the response is a gogo message, so we cannot just return it. - // since the request came as protov2, we expect the response - // to also be protov2. - respBytes, err := cdc.Marshal(resp.(gogoproto.Message)) - if err != nil { - return err - } - // now we unmarshal back into a protov2 message. - return proto2.Unmarshal(respBytes, outResp.(proto2.Message)) - case gogoproto.Message: - // we can just call the handler after making a copy of the message, for safety reasons. - resp, err := method.Handler(handler, ctx, func(msg any) error { - // ref: https://github.com/cosmos/cosmos-sdk/issues/18003 - gogoproto.Merge(msg.(gogoproto.Message), m) - return nil - }, nil) - if err != nil { - return err - } - // merge on the resp, ref: https://github.com/cosmos/cosmos-sdk/issues/18003 - gogoproto.Merge(outResp.(gogoproto.Message), resp.(gogoproto.Message)) - return nil - default: - panic("unreachable") - } - }, nil -} - -// isProtov2 returns true if the given method accepts protov2 messages. -// Returns false if it does not. -// It uses the decoder function passed to the method handler to determine -// the type. Since the decoder function is passed in by the concrete implementer the expected -// message where bytes are unmarshaled to, we can use that to determine the type. -func isProtov2(md grpc.MethodDesc) (isV2Type bool, err error) { - pullRequestType := func(msg interface{}) error { - typ := reflect.TypeOf(msg) - switch { - case typ.Implements(protov2Type): - isV2Type = true - return nil - case typ.Implements(gogoType): - isV2Type = false - return nil - default: - err = fmt.Errorf("invalid request type %T, expected protov2 or gogo message", msg) - return nil - } - } - // doNotExecute is a dummy handler that stops the request execution. - doNotExecute := func(_ context.Context, _ any, _ *grpc.UnaryServerInfo, _ grpc.UnaryHandler) (any, error) { - return nil, nil - } - // we are allowed to pass in a nil context and nil request, since we are not actually executing the request. - // this is made possible by the doNotExecute function which immediately returns without calling other handlers. - _, _ = md.Handler(nil, nil, pullRequestType, doNotExecute) - return -} - -// RequestFullNameFromMethodDesc returns the fully-qualified name of the request message of the provided service's method. -func RequestFullNameFromMethodDesc(sd *grpc.ServiceDesc, method grpc.MethodDesc) (protoreflect.FullName, error) { - methodFullName := protoreflect.FullName(fmt.Sprintf("%s.%s", sd.ServiceName, method.MethodName)) - desc, err := gogoproto.HybridResolver.FindDescriptorByName(methodFullName) - if err != nil { - return "", fmt.Errorf("cannot find method descriptor %s", methodFullName) - } - methodDesc, ok := desc.(protoreflect.MethodDescriptor) - if !ok { - return "", fmt.Errorf("invalid method descriptor %s", methodFullName) - } - return methodDesc.Input().FullName(), nil -} diff --git a/baseapp/msg_service_router.go b/baseapp/msg_service_router.go deleted file mode 100644 index 126e0f65e5..0000000000 --- a/baseapp/msg_service_router.go +++ /dev/null @@ -1,221 +0,0 @@ -package baseapp - -import ( - "context" - "fmt" - - gogogrpc "github.com/cosmos/gogoproto/grpc" - "github.com/cosmos/gogoproto/proto" - "google.golang.org/grpc" - "google.golang.org/protobuf/runtime/protoiface" - - errorsmod "cosmossdk.io/errors" - - "github.com/cosmos/cosmos-sdk/baseapp/internal/protocompat" - "github.com/cosmos/cosmos-sdk/codec" - codectypes "github.com/cosmos/cosmos-sdk/codec/types" - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" -) - -// MessageRouter ADR 031 request type routing -// https://github.com/cosmos/cosmos-sdk/blob/main/docs/architecture/adr-031-msg-service.md -type MessageRouter interface { - Handler(msg sdk.Msg) MsgServiceHandler - HandlerByTypeURL(typeURL string) MsgServiceHandler -} - -// MsgServiceRouter routes fully-qualified Msg service methods to their handler. -type MsgServiceRouter struct { - interfaceRegistry codectypes.InterfaceRegistry - routes map[string]MsgServiceHandler - hybridHandlers map[string]func(ctx context.Context, req, resp protoiface.MessageV1) error - circuitBreaker CircuitBreaker -} - -var _ gogogrpc.Server = &MsgServiceRouter{} - -// NewMsgServiceRouter creates a new MsgServiceRouter. -func NewMsgServiceRouter() *MsgServiceRouter { - return &MsgServiceRouter{ - routes: map[string]MsgServiceHandler{}, - hybridHandlers: map[string]func(ctx context.Context, req, resp protoiface.MessageV1) error{}, - } -} - -func (msr *MsgServiceRouter) SetCircuit(cb CircuitBreaker) { - msr.circuitBreaker = cb -} - -// MsgServiceHandler defines a function type which handles Msg service message. -type MsgServiceHandler = func(ctx sdk.Context, req sdk.Msg) (*sdk.Result, error) - -// Handler returns the MsgServiceHandler for a given msg or nil if not found. -func (msr *MsgServiceRouter) Handler(msg sdk.Msg) MsgServiceHandler { - return msr.routes[sdk.MsgTypeURL(msg)] -} - -// HandlerByTypeURL returns the MsgServiceHandler for a given query route path or nil -// if not found. -func (msr *MsgServiceRouter) HandlerByTypeURL(typeURL string) MsgServiceHandler { - return msr.routes[typeURL] -} - -// RegisterService implements the gRPC Server.RegisterService method. sd is a gRPC -// service description, handler is an object which implements that gRPC service. -// -// This function PANICs: -// - if it is called before the service `Msg`s have been registered using -// RegisterInterfaces, -// - or if a service is being registered twice. -func (msr *MsgServiceRouter) RegisterService(sd *grpc.ServiceDesc, handler interface{}) { - // Adds a top-level query handler based on the gRPC service name. - for _, method := range sd.Methods { - err := msr.registerMsgServiceHandler(sd, method, handler) - if err != nil { - panic(err) - } - err = msr.registerHybridHandler(sd, method, handler) - if err != nil { - panic(err) - } - } -} - -func (msr *MsgServiceRouter) HybridHandlerByMsgName(msgName string) func(ctx context.Context, req, resp protoiface.MessageV1) error { - return msr.hybridHandlers[msgName] -} - -func (msr *MsgServiceRouter) registerHybridHandler(sd *grpc.ServiceDesc, method grpc.MethodDesc, handler interface{}) error { - inputName, err := protocompat.RequestFullNameFromMethodDesc(sd, method) - if err != nil { - return err - } - cdc := codec.NewProtoCodec(msr.interfaceRegistry) - hybridHandler, err := protocompat.MakeHybridHandler(cdc, sd, method, handler) - if err != nil { - return err - } - // if circuit breaker is not nil, then we decorate the hybrid handler with the circuit breaker - if msr.circuitBreaker == nil { - msr.hybridHandlers[string(inputName)] = hybridHandler - return nil - } - // decorate the hybrid handler with the circuit breaker - circuitBreakerHybridHandler := func(ctx context.Context, req, resp protoiface.MessageV1) error { - messageName := codectypes.MsgTypeURL(req) - allowed, err := msr.circuitBreaker.IsAllowed(ctx, messageName) - if err != nil { - return err - } - if !allowed { - return fmt.Errorf("circuit breaker disallows execution of message %s", messageName) - } - return hybridHandler(ctx, req, resp) - } - msr.hybridHandlers[string(inputName)] = circuitBreakerHybridHandler - return nil -} - -func (msr *MsgServiceRouter) registerMsgServiceHandler(sd *grpc.ServiceDesc, method grpc.MethodDesc, handler interface{}) error { - fqMethod := fmt.Sprintf("/%s/%s", sd.ServiceName, method.MethodName) - methodHandler := method.Handler - - var requestTypeName string - - // NOTE: This is how we pull the concrete request type for each handler for registering in the InterfaceRegistry. - // This approach is maybe a bit hacky, but less hacky than reflecting on the handler object itself. - // We use a no-op interceptor to avoid actually calling into the handler itself. - _, _ = methodHandler(nil, context.Background(), func(i interface{}) error { - msg, ok := i.(sdk.Msg) - if !ok { - // We panic here because there is no other alternative and the app cannot be initialized correctly - // this should only happen if there is a problem with code generation in which case the app won't - // work correctly anyway. - panic(fmt.Errorf("unable to register service method %s: %T does not implement sdk.Msg", fqMethod, i)) - } - - requestTypeName = sdk.MsgTypeURL(msg) - return nil - }, noopInterceptor) - - // Check that the service Msg fully-qualified method name has already - // been registered (via RegisterInterfaces). If the user registers a - // service without registering according service Msg type, there might be - // some unexpected behavior down the road. Since we can't return an error - // (`Server.RegisterService` interface restriction) we panic (at startup). - reqType, err := msr.interfaceRegistry.Resolve(requestTypeName) - if err != nil || reqType == nil { - return fmt.Errorf( - "type_url %s has not been registered yet. "+ - "Before calling RegisterService, you must register all interfaces by calling the `RegisterInterfaces` "+ - "method on module.BasicManager. Each module should call `msgservice.RegisterMsgServiceDesc` inside its "+ - "`RegisterInterfaces` method with the `_Msg_serviceDesc` generated by proto-gen", - requestTypeName, - ) - } - - // Check that each service is only registered once. If a service is - // registered more than once, then we should error. Since we can't - // return an error (`Server.RegisterService` interface restriction) we - // panic (at startup). - _, found := msr.routes[requestTypeName] - if found { - return fmt.Errorf( - "msg service %s has already been registered. Please make sure to only register each service once. "+ - "This usually means that there are conflicting modules registering the same msg service", - fqMethod, - ) - } - - msr.routes[requestTypeName] = func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - ctx = ctx.WithEventManager(sdk.NewEventManager()) - interceptor := func(goCtx context.Context, _ interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - goCtx = context.WithValue(goCtx, sdk.SdkContextKey, ctx) - return handler(goCtx, msg) - } - - if m, ok := msg.(sdk.HasValidateBasic); ok { - if err := m.ValidateBasic(); err != nil { - return nil, err - } - } - - if msr.circuitBreaker != nil { - msgURL := sdk.MsgTypeURL(msg) - isAllowed, err := msr.circuitBreaker.IsAllowed(ctx, msgURL) - if err != nil { - return nil, err - } - - if !isAllowed { - return nil, fmt.Errorf("circuit breaker disables execution of this message: %s", msgURL) - } - } - - // Call the method handler from the service description with the handler object. - // We don't do any decoding here because the decoding was already done. - res, err := methodHandler(handler, ctx, noopDecoder, interceptor) - if err != nil { - return nil, err - } - - resMsg, ok := res.(proto.Message) - if !ok { - return nil, errorsmod.Wrapf(sdkerrors.ErrInvalidType, "Expecting proto.Message, got %T", resMsg) - } - - return sdk.WrapServiceResult(ctx, resMsg, err) - } - return nil -} - -// SetInterfaceRegistry sets the interface registry for the router. -func (msr *MsgServiceRouter) SetInterfaceRegistry(interfaceRegistry codectypes.InterfaceRegistry) { - msr.interfaceRegistry = interfaceRegistry -} - -func noopDecoder(_ interface{}) error { return nil } -func noopInterceptor(_ context.Context, _ interface{}, _ *grpc.UnaryServerInfo, _ grpc.UnaryHandler) (interface{}, error) { - return nil, nil -} diff --git a/baseapp/msg_service_router_test.go b/baseapp/msg_service_router_test.go deleted file mode 100644 index 8ddb490e99..0000000000 --- a/baseapp/msg_service_router_test.go +++ /dev/null @@ -1,202 +0,0 @@ -package baseapp_test - -import ( - "context" - "testing" - - abci "github.com/cometbft/cometbft/abci/types" - dbm "github.com/cosmos/cosmos-db" - "github.com/stretchr/testify/require" - - "cosmossdk.io/depinject" - "cosmossdk.io/log" - - "github.com/cosmos/cosmos-sdk/client/tx" - "github.com/cosmos/cosmos-sdk/codec" - codectypes "github.com/cosmos/cosmos-sdk/codec/types" - "github.com/cosmos/cosmos-sdk/runtime" - "github.com/cosmos/cosmos-sdk/testutil/testdata" - "github.com/cosmos/cosmos-sdk/types/tx/signing" - authsigning "github.com/cosmos/cosmos-sdk/x/auth/signing" - authtx "github.com/cosmos/cosmos-sdk/x/auth/tx" -) - -func TestRegisterMsgService(t *testing.T) { - // Setup baseapp. - var ( - appBuilder *runtime.AppBuilder - registry codectypes.InterfaceRegistry - ) - err := depinject.Inject( - depinject.Configs( - makeMinimalConfig(), - depinject.Supply(log.NewTestLogger(t)), - ), &appBuilder, ®istry) - require.NoError(t, err) - app := appBuilder.Build(dbm.NewMemDB(), nil) - - require.Panics(t, func() { - testdata.RegisterMsgServer( - app.MsgServiceRouter(), - testdata.MsgServerImpl{}, - ) - }) - - // Register testdata Msg services, and rerun `RegisterMsgService`. - testdata.RegisterInterfaces(registry) - - require.NotPanics(t, func() { - testdata.RegisterMsgServer( - app.MsgServiceRouter(), - testdata.MsgServerImpl{}, - ) - }) -} - -func TestRegisterMsgServiceTwice(t *testing.T) { - // Setup baseapp. - var ( - appBuilder *runtime.AppBuilder - registry codectypes.InterfaceRegistry - ) - err := depinject.Inject( - depinject.Configs( - makeMinimalConfig(), - depinject.Supply(log.NewTestLogger(t)), - ), &appBuilder, ®istry) - require.NoError(t, err) - db := dbm.NewMemDB() - app := appBuilder.Build(db, nil) - testdata.RegisterInterfaces(registry) - - // First time registering service shouldn't panic. - require.NotPanics(t, func() { - testdata.RegisterMsgServer( - app.MsgServiceRouter(), - testdata.MsgServerImpl{}, - ) - }) - - // Second time should panic. - require.Panics(t, func() { - testdata.RegisterMsgServer( - app.MsgServiceRouter(), - testdata.MsgServerImpl{}, - ) - }) -} - -func TestHybridHandlerByMsgName(t *testing.T) { - // Setup baseapp and router. - var ( - appBuilder *runtime.AppBuilder - registry codectypes.InterfaceRegistry - ) - err := depinject.Inject( - depinject.Configs( - makeMinimalConfig(), - depinject.Supply(log.NewTestLogger(t)), - ), &appBuilder, ®istry) - require.NoError(t, err) - db := dbm.NewMemDB() - app := appBuilder.Build(db, nil) - testdata.RegisterInterfaces(registry) - - testdata.RegisterMsgServer( - app.MsgServiceRouter(), - testdata.MsgServerImpl{}, - ) - - handler := app.MsgServiceRouter().HybridHandlerByMsgName("testpb.MsgCreateDog") - - require.NotNil(t, handler) - require.NoError(t, app.Init()) - ctx := app.NewContext(true) - resp := new(testdata.MsgCreateDogResponse) - err = handler(ctx, &testdata.MsgCreateDog{ - Dog: &testdata.Dog{Name: "Spot"}, - Owner: "me", - }, resp) - require.NoError(t, err) - require.Equal(t, resp.Name, "Spot") -} - -func TestMsgService(t *testing.T) { - priv, _, _ := testdata.KeyTestPubAddr() - - var ( - appBuilder *runtime.AppBuilder - cdc codec.Codec - interfaceRegistry codectypes.InterfaceRegistry - ) - err := depinject.Inject( - depinject.Configs( - makeMinimalConfig(), - depinject.Supply(log.NewNopLogger()), - ), &appBuilder, &cdc, &interfaceRegistry) - require.NoError(t, err) - app := appBuilder.Build(dbm.NewMemDB(), nil) - - // patch in TxConfig instead of using an output from x/auth/tx - txConfig := authtx.NewTxConfig(cdc, authtx.DefaultSignModes) - // set the TxDecoder in the BaseApp for minimal tx simulations - app.SetTxDecoder(txConfig.TxDecoder()) - - defaultSignMode, err := authsigning.APISignModeToInternal(txConfig.SignModeHandler().DefaultMode()) - require.NoError(t, err) - - testdata.RegisterInterfaces(interfaceRegistry) - testdata.RegisterMsgServer( - app.MsgServiceRouter(), - testdata.MsgServerImpl{}, - ) - _, err = app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1}) - require.NoError(t, err) - - _, _, addr := testdata.KeyTestPubAddr() - msg := testdata.MsgCreateDog{ - Dog: &testdata.Dog{Name: "Spot"}, - Owner: addr.String(), - } - - txBuilder := txConfig.NewTxBuilder() - txBuilder.SetFeeAmount(testdata.NewTestFeeAmount()) - txBuilder.SetGasLimit(testdata.NewTestGasLimit()) - err = txBuilder.SetMsgs(&msg) - require.NoError(t, err) - - // First round: we gather all the signer infos. We use the "set empty - // signature" hack to do that. - sigV2 := signing.SignatureV2{ - PubKey: priv.PubKey(), - Data: &signing.SingleSignatureData{ - SignMode: defaultSignMode, - Signature: nil, - }, - Sequence: 0, - } - - err = txBuilder.SetSignatures(sigV2) - require.NoError(t, err) - - // Second round: all signer infos are set, so each signer can sign. - signerData := authsigning.SignerData{ - ChainID: "test", - AccountNumber: 0, - Sequence: 0, - PubKey: priv.PubKey(), - } - sigV2, err = tx.SignWithPrivKey( - context.TODO(), defaultSignMode, signerData, - txBuilder, priv, txConfig, 0) - require.NoError(t, err) - err = txBuilder.SetSignatures(sigV2) - require.NoError(t, err) - - // Send the tx to the app - txBytes, err := txConfig.TxEncoder()(txBuilder.GetTx()) - require.NoError(t, err) - res, err := app.FinalizeBlock(&abci.RequestFinalizeBlock{Height: 1, Txs: [][]byte{txBytes}}) - require.NoError(t, err) - require.Equal(t, abci.CodeTypeOK, res.TxResults[0].Code, "res=%+v", res) -} diff --git a/baseapp/oe/optimistic_execution.go b/baseapp/oe/optimistic_execution.go deleted file mode 100644 index 2a6d347709..0000000000 --- a/baseapp/oe/optimistic_execution.go +++ /dev/null @@ -1,157 +0,0 @@ -package oe - -import ( - "bytes" - "context" - "encoding/hex" - "math/rand" - "sync" - "time" - - abci "github.com/cometbft/cometbft/abci/types" - - "cosmossdk.io/log" -) - -// FinalizeBlockFunc is the function that is called by the OE to finalize the -// block. It is the same as the one in the ABCI app. -type FinalizeBlockFunc func(context.Context, *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) - -// OptimisticExecution is a struct that contains the OE context. It is used to -// run the FinalizeBlock function in a goroutine, and to abort it if needed. -type OptimisticExecution struct { - finalizeBlockFunc FinalizeBlockFunc // ABCI FinalizeBlock function with a context - logger log.Logger - - mtx sync.Mutex - stopCh chan struct{} - request *abci.RequestFinalizeBlock - response *abci.ResponseFinalizeBlock - err error - cancelFunc func() // cancel function for the context - initialized bool // A boolean value indicating whether the struct has been initialized - - // debugging/testing options - abortRate int // number from 0 to 100 that determines the percentage of OE that should be aborted -} - -// NewOptimisticExecution initializes the Optimistic Execution context but does not start it. -func NewOptimisticExecution(logger log.Logger, fn FinalizeBlockFunc, opts ...func(*OptimisticExecution)) *OptimisticExecution { - logger = logger.With(log.ModuleKey, "oe") - oe := &OptimisticExecution{logger: logger, finalizeBlockFunc: fn} - for _, opt := range opts { - opt(oe) - } - return oe -} - -// WithAbortRate sets the abort rate for the OE. The abort rate is a number from -// 0 to 100 that determines the percentage of OE that should be aborted. -// This is for testing purposes only and must not be used in production. -func WithAbortRate(rate int) func(*OptimisticExecution) { - return func(oe *OptimisticExecution) { - oe.abortRate = rate - } -} - -// Reset resets the OE context. Must be called whenever we want to invalidate -// the current OE. -func (oe *OptimisticExecution) Reset() { - oe.mtx.Lock() - defer oe.mtx.Unlock() - oe.request = nil - oe.response = nil - oe.err = nil - oe.initialized = false -} - -func (oe *OptimisticExecution) Enabled() bool { - return oe != nil -} - -// Initialized returns true if the OE was initialized, meaning that it contains -// a request and it was run or it is running. -func (oe *OptimisticExecution) Initialized() bool { - if oe == nil { - return false - } - oe.mtx.Lock() - defer oe.mtx.Unlock() - - return oe.initialized -} - -// Execute initializes the OE and starts it in a goroutine. -func (oe *OptimisticExecution) Execute(req *abci.RequestProcessProposal) { - oe.mtx.Lock() - defer oe.mtx.Unlock() - - oe.stopCh = make(chan struct{}) - oe.request = &abci.RequestFinalizeBlock{ - Txs: req.Txs, - DecidedLastCommit: req.ProposedLastCommit, - Misbehavior: req.Misbehavior, - Hash: req.Hash, - Height: req.Height, - Time: req.Time, - NextValidatorsHash: req.NextValidatorsHash, - ProposerAddress: req.ProposerAddress, - } - - oe.logger.Debug("OE started", "height", req.Height, "hash", hex.EncodeToString(req.Hash), "time", req.Time.String()) - ctx, cancel := context.WithCancel(context.Background()) - oe.cancelFunc = cancel - oe.initialized = true - - go func() { - start := time.Now() - resp, err := oe.finalizeBlockFunc(ctx, oe.request) - oe.mtx.Lock() - executionTime := time.Since(start) - oe.logger.Debug("OE finished", "duration", executionTime.String(), "height", req.Height, "hash", hex.EncodeToString(req.Hash)) - oe.response, oe.err = resp, err - close(oe.stopCh) - oe.mtx.Unlock() - }() -} - -// AbortIfNeeded aborts the OE if the request hash is not the same as the one in -// the running OE. Returns true if the OE was aborted. -func (oe *OptimisticExecution) AbortIfNeeded(reqHash []byte) bool { - if oe == nil { - return false - } - - oe.mtx.Lock() - defer oe.mtx.Unlock() - - if !bytes.Equal(oe.request.Hash, reqHash) { - oe.logger.Error("OE aborted due to hash mismatch", "oe_hash", hex.EncodeToString(oe.request.Hash), "req_hash", hex.EncodeToString(reqHash), "oe_height", oe.request.Height, "req_height", oe.request.Height) - oe.cancelFunc() - return true - } else if oe.abortRate > 0 && rand.Intn(100) < oe.abortRate { - // this is for test purposes only, we can emulate a certain percentage of - // OE needed to be aborted. - oe.cancelFunc() - oe.logger.Error("OE aborted due to test abort rate") - return true - } - - return false -} - -// Abort aborts the OE unconditionally and waits for it to finish. -func (oe *OptimisticExecution) Abort() { - if oe == nil || oe.cancelFunc == nil { - return - } - - oe.cancelFunc() - <-oe.stopCh -} - -// WaitResult waits for the OE to finish and returns the result. -func (oe *OptimisticExecution) WaitResult() (*abci.ResponseFinalizeBlock, error) { - <-oe.stopCh - return oe.response, oe.err -} diff --git a/baseapp/oe/optimistic_execution_test.go b/baseapp/oe/optimistic_execution_test.go deleted file mode 100644 index 0b92244783..0000000000 --- a/baseapp/oe/optimistic_execution_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package oe - -import ( - "context" - "errors" - "testing" - - abci "github.com/cometbft/cometbft/abci/types" - "github.com/stretchr/testify/assert" - - "cosmossdk.io/log" -) - -func testFinalizeBlock(_ context.Context, _ *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { - return nil, errors.New("test error") -} - -func TestOptimisticExecution(t *testing.T) { - oe := NewOptimisticExecution(log.NewNopLogger(), testFinalizeBlock) - assert.True(t, oe.Enabled()) - oe.Execute(&abci.RequestProcessProposal{ - Hash: []byte("test"), - }) - assert.True(t, oe.Initialized()) - - resp, err := oe.WaitResult() - assert.Nil(t, resp) - assert.EqualError(t, err, "test error") - - assert.False(t, oe.AbortIfNeeded([]byte("test"))) - assert.True(t, oe.AbortIfNeeded([]byte("wrong_hash"))) - - oe.Reset() -} diff --git a/baseapp/options.go b/baseapp/options.go deleted file mode 100644 index c0c9caf058..0000000000 --- a/baseapp/options.go +++ /dev/null @@ -1,359 +0,0 @@ -package baseapp - -import ( - "fmt" - "io" - "math" - - dbm "github.com/cosmos/cosmos-db" - - "cosmossdk.io/store/metrics" - pruningtypes "cosmossdk.io/store/pruning/types" - "cosmossdk.io/store/snapshots" - snapshottypes "cosmossdk.io/store/snapshots/types" - storetypes "cosmossdk.io/store/types" - - "github.com/cosmos/cosmos-sdk/baseapp/oe" - "github.com/cosmos/cosmos-sdk/codec" - "github.com/cosmos/cosmos-sdk/codec/types" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/mempool" -) - -// File for storing in-package BaseApp optional functions, -// for options that need access to non-exported fields of the BaseApp - -// SetPruning sets a pruning option on the multistore associated with the app -func SetPruning(opts pruningtypes.PruningOptions) func(*BaseApp) { - return func(bapp *BaseApp) { bapp.cms.SetPruning(opts) } -} - -// SetMinGasPrices returns an option that sets the minimum gas prices on the app. -func SetMinGasPrices(gasPricesStr string) func(*BaseApp) { - gasPrices, err := sdk.ParseDecCoins(gasPricesStr) - if err != nil { - panic(fmt.Sprintf("invalid minimum gas prices: %v", err)) - } - - return func(bapp *BaseApp) { bapp.setMinGasPrices(gasPrices) } -} - -// SetQueryGasLimit returns an option that sets a gas limit for queries. -func SetQueryGasLimit(queryGasLimit uint64) func(*BaseApp) { - if queryGasLimit == 0 { - queryGasLimit = math.MaxUint64 - } - - return func(bapp *BaseApp) { bapp.queryGasLimit = queryGasLimit } -} - -// SetHaltHeight returns a BaseApp option function that sets the halt block height. -func SetHaltHeight(blockHeight uint64) func(*BaseApp) { - return func(bapp *BaseApp) { bapp.setHaltHeight(blockHeight) } -} - -// SetHaltTime returns a BaseApp option function that sets the halt block time. -func SetHaltTime(haltTime uint64) func(*BaseApp) { - return func(bapp *BaseApp) { bapp.setHaltTime(haltTime) } -} - -// SetMinRetainBlocks returns a BaseApp option function that sets the minimum -// block retention height value when determining which heights to prune during -// ABCI Commit. -func SetMinRetainBlocks(minRetainBlocks uint64) func(*BaseApp) { - return func(bapp *BaseApp) { bapp.setMinRetainBlocks(minRetainBlocks) } -} - -// SetTrace will turn on or off trace flag -func SetTrace(trace bool) func(*BaseApp) { - return func(app *BaseApp) { app.setTrace(trace) } -} - -// SetIndexEvents provides a BaseApp option function that sets the events to index. -func SetIndexEvents(ie []string) func(*BaseApp) { - return func(app *BaseApp) { app.setIndexEvents(ie) } -} - -// SetIAVLCacheSize provides a BaseApp option function that sets the size of IAVL cache. -func SetIAVLCacheSize(size int) func(*BaseApp) { - return func(bapp *BaseApp) { bapp.cms.SetIAVLCacheSize(size) } -} - -// SetIAVLDisableFastNode enables(false)/disables(true) fast node usage from the IAVL store. -func SetIAVLDisableFastNode(disable bool) func(*BaseApp) { - return func(bapp *BaseApp) { bapp.cms.SetIAVLDisableFastNode(disable) } -} - -// SetInterBlockCache provides a BaseApp option function that sets the -// inter-block cache. -func SetInterBlockCache(cache storetypes.MultiStorePersistentCache) func(*BaseApp) { - return func(app *BaseApp) { app.setInterBlockCache(cache) } -} - -// SetSnapshot sets the snapshot store. -func SetSnapshot(snapshotStore *snapshots.Store, opts snapshottypes.SnapshotOptions) func(*BaseApp) { - return func(app *BaseApp) { app.SetSnapshot(snapshotStore, opts) } -} - -// SetMempool sets the mempool on BaseApp. -func SetMempool(mempool mempool.Mempool) func(*BaseApp) { - return func(app *BaseApp) { app.SetMempool(mempool) } -} - -// SetChainID sets the chain ID in BaseApp. -func SetChainID(chainID string) func(*BaseApp) { - return func(app *BaseApp) { app.chainID = chainID } -} - -// SetOptimisticExecution enables optimistic execution. -func SetOptimisticExecution(opts ...func(*oe.OptimisticExecution)) func(*BaseApp) { - return func(app *BaseApp) { - app.optimisticExec = oe.NewOptimisticExecution(app.logger, app.internalFinalizeBlock, opts...) - } -} - -func (app *BaseApp) SetName(name string) { - if app.sealed { - panic("SetName() on sealed BaseApp") - } - - app.name = name -} - -// SetParamStore sets a parameter store on the BaseApp. -func (app *BaseApp) SetParamStore(ps ParamStore) { - if app.sealed { - panic("SetParamStore() on sealed BaseApp") - } - - app.paramStore = ps -} - -// SetVersion sets the application's version string. -func (app *BaseApp) SetVersion(v string) { - if app.sealed { - panic("SetVersion() on sealed BaseApp") - } - app.version = v -} - -// SetProtocolVersion sets the application's protocol version -func (app *BaseApp) SetProtocolVersion(v uint64) { - app.appVersion = v -} - -func (app *BaseApp) SetDB(db dbm.DB) { - if app.sealed { - panic("SetDB() on sealed BaseApp") - } - - app.db = db -} - -func (app *BaseApp) SetCMS(cms storetypes.CommitMultiStore) { - if app.sealed { - panic("SetEndBlocker() on sealed BaseApp") - } - - app.cms = cms -} - -func (app *BaseApp) SetInitChainer(initChainer sdk.InitChainer) { - if app.sealed { - panic("SetInitChainer() on sealed BaseApp") - } - - app.initChainer = initChainer -} - -func (app *BaseApp) PreBlocker() sdk.PreBlocker { - return app.preBlocker -} - -func (app *BaseApp) SetPreBlocker(preBlocker sdk.PreBlocker) { - if app.sealed { - panic("SetPreBlocker() on sealed BaseApp") - } - - app.preBlocker = preBlocker -} - -func (app *BaseApp) SetBeginBlocker(beginBlocker sdk.BeginBlocker) { - if app.sealed { - panic("SetBeginBlocker() on sealed BaseApp") - } - - app.beginBlocker = beginBlocker -} - -func (app *BaseApp) SetEndBlocker(endBlocker sdk.EndBlocker) { - if app.sealed { - panic("SetEndBlocker() on sealed BaseApp") - } - - app.endBlocker = endBlocker -} - -func (app *BaseApp) SetPrepareCheckStater(prepareCheckStater sdk.PrepareCheckStater) { - if app.sealed { - panic("SetPrepareCheckStater() on sealed BaseApp") - } - - app.prepareCheckStater = prepareCheckStater -} - -func (app *BaseApp) SetPrecommiter(precommiter sdk.Precommiter) { - if app.sealed { - panic("SetPrecommiter() on sealed BaseApp") - } - - app.precommiter = precommiter -} - -func (app *BaseApp) SetAnteHandler(ah sdk.AnteHandler) { - if app.sealed { - panic("SetAnteHandler() on sealed BaseApp") - } - - app.anteHandler = ah -} - -func (app *BaseApp) SetPostHandler(ph sdk.PostHandler) { - if app.sealed { - panic("SetPostHandler() on sealed BaseApp") - } - - app.postHandler = ph -} - -func (app *BaseApp) SetAddrPeerFilter(pf sdk.PeerFilter) { - if app.sealed { - panic("SetAddrPeerFilter() on sealed BaseApp") - } - - app.addrPeerFilter = pf -} - -func (app *BaseApp) SetIDPeerFilter(pf sdk.PeerFilter) { - if app.sealed { - panic("SetIDPeerFilter() on sealed BaseApp") - } - - app.idPeerFilter = pf -} - -func (app *BaseApp) SetFauxMerkleMode() { - if app.sealed { - panic("SetFauxMerkleMode() on sealed BaseApp") - } - - app.fauxMerkleMode = true -} - -// SetCommitMultiStoreTracer sets the store tracer on the BaseApp's underlying -// CommitMultiStore. -func (app *BaseApp) SetCommitMultiStoreTracer(w io.Writer) { - app.cms.SetTracer(w) -} - -// SetStoreLoader allows us to customize the rootMultiStore initialization. -func (app *BaseApp) SetStoreLoader(loader StoreLoader) { - if app.sealed { - panic("SetStoreLoader() on sealed BaseApp") - } - - app.storeLoader = loader -} - -// SetSnapshot sets the snapshot store and options. -func (app *BaseApp) SetSnapshot(snapshotStore *snapshots.Store, opts snapshottypes.SnapshotOptions) { - if app.sealed { - panic("SetSnapshot() on sealed BaseApp") - } - if snapshotStore == nil { - app.snapshotManager = nil - return - } - app.cms.SetSnapshotInterval(opts.Interval) - app.snapshotManager = snapshots.NewManager(snapshotStore, opts, app.cms, nil, app.logger) -} - -// SetInterfaceRegistry sets the InterfaceRegistry. -func (app *BaseApp) SetInterfaceRegistry(registry types.InterfaceRegistry) { - app.interfaceRegistry = registry - app.grpcQueryRouter.SetInterfaceRegistry(registry) - app.msgServiceRouter.SetInterfaceRegistry(registry) - app.cdc = codec.NewProtoCodec(registry) -} - -// SetTxDecoder sets the TxDecoder if it wasn't provided in the BaseApp constructor. -func (app *BaseApp) SetTxDecoder(txDecoder sdk.TxDecoder) { - app.txDecoder = txDecoder -} - -// SetTxEncoder sets the TxEncoder if it wasn't provided in the BaseApp constructor. -func (app *BaseApp) SetTxEncoder(txEncoder sdk.TxEncoder) { - app.txEncoder = txEncoder -} - -// SetQueryMultiStore set a alternative MultiStore implementation to support grpc query service. -// -// Ref: https://github.com/cosmos/cosmos-sdk/issues/13317 -func (app *BaseApp) SetQueryMultiStore(ms storetypes.MultiStore) { - app.qms = ms -} - -// SetMempool sets the mempool for the BaseApp and is required for the app to start up. -func (app *BaseApp) SetMempool(mempool mempool.Mempool) { - if app.sealed { - panic("SetMempool() on sealed BaseApp") - } - app.mempool = mempool -} - -// SetProcessProposal sets the process proposal function for the BaseApp. -func (app *BaseApp) SetProcessProposal(handler sdk.ProcessProposalHandler) { - if app.sealed { - panic("SetProcessProposal() on sealed BaseApp") - } - app.processProposal = handler -} - -// SetPrepareProposal sets the prepare proposal function for the BaseApp. -func (app *BaseApp) SetPrepareProposal(handler sdk.PrepareProposalHandler) { - if app.sealed { - panic("SetPrepareProposal() on sealed BaseApp") - } - - app.prepareProposal = handler -} - -func (app *BaseApp) SetExtendVoteHandler(handler sdk.ExtendVoteHandler) { - if app.sealed { - panic("SetExtendVoteHandler() on sealed BaseApp") - } - - app.extendVote = handler -} - -func (app *BaseApp) SetVerifyVoteExtensionHandler(handler sdk.VerifyVoteExtensionHandler) { - if app.sealed { - panic("SetVerifyVoteExtensionHandler() on sealed BaseApp") - } - - app.verifyVoteExt = handler -} - -// SetStoreMetrics sets the prepare proposal function for the BaseApp. -func (app *BaseApp) SetStoreMetrics(gatherer metrics.StoreMetrics) { - if app.sealed { - panic("SetStoreMetrics() on sealed BaseApp") - } - - app.cms.SetMetrics(gatherer) -} - -// SetStreamingManager sets the streaming manager for the BaseApp. -func (app *BaseApp) SetStreamingManager(manager storetypes.StreamingManager) { - app.streamingManager = manager -} diff --git a/baseapp/params.go b/baseapp/params.go deleted file mode 100644 index 329ec1255b..0000000000 --- a/baseapp/params.go +++ /dev/null @@ -1,15 +0,0 @@ -package baseapp - -import ( - "context" - - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" -) - -// ParamStore defines the interface the parameter store used by the BaseApp must -// fulfill. -type ParamStore interface { - Get(ctx context.Context) (cmtproto.ConsensusParams, error) - Has(ctx context.Context) (bool, error) - Set(ctx context.Context, cp cmtproto.ConsensusParams) error -} diff --git a/baseapp/params_legacy.go b/baseapp/params_legacy.go deleted file mode 100644 index 13fd34add7..0000000000 --- a/baseapp/params_legacy.go +++ /dev/null @@ -1,150 +0,0 @@ -/* -Deprecated. - -Legacy types are defined below to aid in the migration of CometBFT consensus -parameters from use of the now deprecated x/params modules to a new dedicated -x/consensus module. - -Application developers should ensure that they implement their upgrade handler -correctly such that app.ConsensusParamsKeeper.Set() is called with the values -returned by GetConsensusParams(). - -Example: - - baseAppLegacySS := app.ParamsKeeper.Subspace(baseapp.Paramspace).WithKeyTable(paramstypes.ConsensusParamsKeyTable()) - - app.UpgradeKeeper.SetUpgradeHandler( - UpgradeName, - func(ctx sdk.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) { - if cp := baseapp.GetConsensusParams(ctx, baseAppLegacySS); cp != nil { - app.ConsensusParamsKeeper.Set(ctx, cp) - } else { - ctx.Logger().Info("warning: consensus parameters are undefined; skipping migration", "upgrade", UpgradeName) - } - - return app.ModuleManager.RunMigrations(ctx, app.Configurator(), fromVM) - }, - ) - -Developers can also bypass the use of the legacy Params subspace and set the -values to app.ConsensusParamsKeeper.Set() explicitly. - -Note, for new chains this is not necessary as CometBFT's consensus parameters -will automatically be set for you in InitChain. -*/ -package baseapp - -import ( - "errors" - "fmt" - - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" - - sdk "github.com/cosmos/cosmos-sdk/types" -) - -const Paramspace = "baseapp" - -var ( - ParamStoreKeyBlockParams = []byte("BlockParams") - ParamStoreKeyEvidenceParams = []byte("EvidenceParams") - ParamStoreKeyValidatorParams = []byte("ValidatorParams") -) - -type LegacyParamStore interface { - Get(ctx sdk.Context, key []byte, ptr interface{}) - Has(ctx sdk.Context, key []byte) bool -} - -func ValidateBlockParams(i interface{}) error { - v, ok := i.(cmtproto.BlockParams) - if !ok { - return fmt.Errorf("invalid parameter type: %T", i) - } - - if v.MaxBytes <= 0 { - return fmt.Errorf("block maximum bytes must be positive: %d", v.MaxBytes) - } - - if v.MaxGas < -1 { - return fmt.Errorf("block maximum gas must be greater than or equal to -1: %d", v.MaxGas) - } - - return nil -} - -func ValidateEvidenceParams(i interface{}) error { - v, ok := i.(cmtproto.EvidenceParams) - if !ok { - return fmt.Errorf("invalid parameter type: %T", i) - } - - if v.MaxAgeNumBlocks <= 0 { - return fmt.Errorf("evidence maximum age in blocks must be positive: %d", v.MaxAgeNumBlocks) - } - - if v.MaxAgeDuration <= 0 { - return fmt.Errorf("evidence maximum age time duration must be positive: %v", v.MaxAgeDuration) - } - - if v.MaxBytes < 0 { - return fmt.Errorf("maximum evidence bytes must be non-negative: %v", v.MaxBytes) - } - - return nil -} - -func ValidateValidatorParams(i interface{}) error { - v, ok := i.(cmtproto.ValidatorParams) - if !ok { - return fmt.Errorf("invalid parameter type: %T", i) - } - - if len(v.PubKeyTypes) == 0 { - return errors.New("validator allowed pubkey types must not be empty") - } - - return nil -} - -func GetConsensusParams(ctx sdk.Context, paramStore LegacyParamStore) *cmtproto.ConsensusParams { - if paramStore == nil { - return nil - } - - cp := new(cmtproto.ConsensusParams) - - if paramStore.Has(ctx, ParamStoreKeyBlockParams) { - var bp cmtproto.BlockParams - - paramStore.Get(ctx, ParamStoreKeyBlockParams, &bp) - cp.Block = &bp - } - - if paramStore.Has(ctx, ParamStoreKeyEvidenceParams) { - var ep cmtproto.EvidenceParams - - paramStore.Get(ctx, ParamStoreKeyEvidenceParams, &ep) - cp.Evidence = &ep - } - - if paramStore.Has(ctx, ParamStoreKeyValidatorParams) { - var vp cmtproto.ValidatorParams - - paramStore.Get(ctx, ParamStoreKeyValidatorParams, &vp) - cp.Validator = &vp - } - - return cp -} - -func MigrateParams(ctx sdk.Context, lps LegacyParamStore, ps ParamStore) error { - if cp := GetConsensusParams(ctx, lps); cp != nil { - if err := ps.Set(ctx, *cp); err != nil { - return err - } - } else { - ctx.Logger().Info("warning: consensus parameters are undefined; skipping migration") - } - return nil -} diff --git a/baseapp/recovery.go b/baseapp/recovery.go deleted file mode 100644 index ca6cfca062..0000000000 --- a/baseapp/recovery.go +++ /dev/null @@ -1,80 +0,0 @@ -package baseapp - -import ( - "fmt" - "runtime/debug" - - errorsmod "cosmossdk.io/errors" - storetypes "cosmossdk.io/store/types" - - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" -) - -// RecoveryHandler handles recovery() object. -// Return a non-nil error if recoveryObj was processed. -// Return nil if recoveryObj was not processed. -type RecoveryHandler func(recoveryObj interface{}) error - -// recoveryMiddleware is wrapper for RecoveryHandler to create chained recovery handling. -// returns (recoveryMiddleware, nil) if recoveryObj was not processed and should be passed to the next middleware in chain. -// returns (nil, error) if recoveryObj was processed and middleware chain processing should be stopped. -type recoveryMiddleware func(recoveryObj interface{}) (recoveryMiddleware, error) - -// processRecovery processes recoveryMiddleware chain for recovery() object. -// Chain processing stops on non-nil error or when chain is processed. -func processRecovery(recoveryObj interface{}, middleware recoveryMiddleware) error { - if middleware == nil { - return nil - } - - next, err := middleware(recoveryObj) - if err != nil { - return err - } - - return processRecovery(recoveryObj, next) -} - -// newRecoveryMiddleware creates a RecoveryHandler middleware. -func newRecoveryMiddleware(handler RecoveryHandler, next recoveryMiddleware) recoveryMiddleware { - return func(recoveryObj interface{}) (recoveryMiddleware, error) { - if err := handler(recoveryObj); err != nil { - return nil, err - } - - return next, nil - } -} - -// newOutOfGasRecoveryMiddleware creates a standard OutOfGas recovery middleware for app.runTx method. -func newOutOfGasRecoveryMiddleware(gasWanted uint64, ctx sdk.Context, next recoveryMiddleware) recoveryMiddleware { - handler := func(recoveryObj interface{}) error { - err, ok := recoveryObj.(storetypes.ErrorOutOfGas) - if !ok { - return nil - } - - return errorsmod.Wrap( - sdkerrors.ErrOutOfGas, fmt.Sprintf( - "out of gas in location: %v; gasWanted: %d, gasUsed: %d", - err.Descriptor, gasWanted, ctx.GasMeter().GasConsumed(), - ), - ) - } - - return newRecoveryMiddleware(handler, next) -} - -// newDefaultRecoveryMiddleware creates a default (last in chain) recovery middleware for app.runTx method. -func newDefaultRecoveryMiddleware() recoveryMiddleware { - handler := func(recoveryObj interface{}) error { - return errorsmod.Wrap( - sdkerrors.ErrPanic, fmt.Sprintf( - "recovered: %v\nstack:\n%v", recoveryObj, string(debug.Stack()), - ), - ) - } - - return newRecoveryMiddleware(handler, nil) -} diff --git a/baseapp/recovery_test.go b/baseapp/recovery_test.go deleted file mode 100644 index b75892c638..0000000000 --- a/baseapp/recovery_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package baseapp - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/require" -) - -// Test that recovery chain produces expected error at specific middleware layer -func TestRecoveryChain(t *testing.T) { - createError := func(id int) error { - return fmt.Errorf("error from id: %d", id) - } - - createHandler := func(id int, handle bool) RecoveryHandler { - return func(_ interface{}) error { - if handle { - return createError(id) - } - return nil - } - } - - // check recovery chain [1] -> 2 -> 3 - { - mw := newRecoveryMiddleware(createHandler(3, false), nil) - mw = newRecoveryMiddleware(createHandler(2, false), mw) - mw = newRecoveryMiddleware(createHandler(1, true), mw) - receivedErr := processRecovery(nil, mw) - - require.Equal(t, createError(1), receivedErr) - } - - // check recovery chain 1 -> [2] -> 3 - { - mw := newRecoveryMiddleware(createHandler(3, false), nil) - mw = newRecoveryMiddleware(createHandler(2, true), mw) - mw = newRecoveryMiddleware(createHandler(1, false), mw) - receivedErr := processRecovery(nil, mw) - - require.Equal(t, createError(2), receivedErr) - } - - // check recovery chain 1 -> 2 -> [3] - { - mw := newRecoveryMiddleware(createHandler(3, true), nil) - mw = newRecoveryMiddleware(createHandler(2, false), mw) - mw = newRecoveryMiddleware(createHandler(1, false), mw) - receivedErr := processRecovery(nil, mw) - - require.Equal(t, createError(3), receivedErr) - } - - // check recovery chain 1 -> 2 -> 3 - { - mw := newRecoveryMiddleware(createHandler(3, false), nil) - mw = newRecoveryMiddleware(createHandler(2, false), mw) - mw = newRecoveryMiddleware(createHandler(1, false), mw) - receivedErr := processRecovery(nil, mw) - - require.Nil(t, receivedErr) - } -} diff --git a/baseapp/snapshot_test.go b/baseapp/snapshot_test.go deleted file mode 100644 index 3051177e47..0000000000 --- a/baseapp/snapshot_test.go +++ /dev/null @@ -1,347 +0,0 @@ -package baseapp_test - -import ( - "context" - "fmt" - "testing" - - abci "github.com/cometbft/cometbft/abci/types" - "github.com/stretchr/testify/require" - - pruningtypes "cosmossdk.io/store/pruning/types" - snapshottypes "cosmossdk.io/store/snapshots/types" -) - -func TestABCI_ListSnapshots(t *testing.T) { - ssCfg := SnapshotsConfig{ - blocks: 5, - blockTxs: 4, - snapshotInterval: 2, - snapshotKeepRecent: 2, - pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), - } - - suite := NewBaseAppSuiteWithSnapshots(t, ssCfg) - - resp, err := suite.baseApp.ListSnapshots(&abci.RequestListSnapshots{}) - require.NoError(t, err) - for _, s := range resp.Snapshots { - require.NotEmpty(t, s.Hash) - require.NotEmpty(t, s.Metadata) - - s.Hash = nil - s.Metadata = nil - } - - require.Equal(t, &abci.ResponseListSnapshots{Snapshots: []*abci.Snapshot{ - {Height: 4, Format: snapshottypes.CurrentFormat, Chunks: 2}, - {Height: 2, Format: snapshottypes.CurrentFormat, Chunks: 1}, - }}, resp) -} - -func TestABCI_SnapshotWithPruning(t *testing.T) { - testCases := map[string]struct { - ssCfg SnapshotsConfig - expectedSnapshots []*abci.Snapshot - }{ - "prune nothing with snapshot": { - ssCfg: SnapshotsConfig{ - blocks: 20, - blockTxs: 2, - snapshotInterval: 5, - snapshotKeepRecent: 1, - pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), - }, - expectedSnapshots: []*abci.Snapshot{ - {Height: 20, Format: snapshottypes.CurrentFormat, Chunks: 5}, - }, - }, - "prune everything with snapshot": { - ssCfg: SnapshotsConfig{ - blocks: 20, - blockTxs: 2, - snapshotInterval: 5, - snapshotKeepRecent: 1, - pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningEverything), - }, - expectedSnapshots: []*abci.Snapshot{ - {Height: 20, Format: snapshottypes.CurrentFormat, Chunks: 5}, - }, - }, - "default pruning with snapshot": { - ssCfg: SnapshotsConfig{ - blocks: 20, - blockTxs: 2, - snapshotInterval: 5, - snapshotKeepRecent: 1, - pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningDefault), - }, - expectedSnapshots: []*abci.Snapshot{ - {Height: 20, Format: snapshottypes.CurrentFormat, Chunks: 5}, - }, - }, - "custom": { - ssCfg: SnapshotsConfig{ - blocks: 25, - blockTxs: 2, - snapshotInterval: 5, - snapshotKeepRecent: 2, - pruningOpts: pruningtypes.NewCustomPruningOptions(12, 12), - }, - expectedSnapshots: []*abci.Snapshot{ - {Height: 25, Format: snapshottypes.CurrentFormat, Chunks: 6}, - {Height: 20, Format: snapshottypes.CurrentFormat, Chunks: 5}, - }, - }, - "no snapshots": { - ssCfg: SnapshotsConfig{ - blocks: 10, - blockTxs: 2, - snapshotInterval: 0, // 0 implies disable snapshots - pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), - }, - expectedSnapshots: []*abci.Snapshot{}, - }, - "keep all snapshots": { - ssCfg: SnapshotsConfig{ - blocks: 10, - blockTxs: 2, - snapshotInterval: 3, - snapshotKeepRecent: 0, // 0 implies keep all snapshots - pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), - }, - expectedSnapshots: []*abci.Snapshot{ - {Height: 9, Format: snapshottypes.CurrentFormat, Chunks: 2}, - {Height: 6, Format: snapshottypes.CurrentFormat, Chunks: 2}, - {Height: 3, Format: snapshottypes.CurrentFormat, Chunks: 1}, - }, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - suite := NewBaseAppSuiteWithSnapshots(t, tc.ssCfg) - - resp, err := suite.baseApp.ListSnapshots(&abci.RequestListSnapshots{}) - require.NoError(t, err) - for _, s := range resp.Snapshots { - require.NotEmpty(t, s.Hash) - require.NotEmpty(t, s.Metadata) - - s.Hash = nil - s.Metadata = nil - } - - require.Equal(t, &abci.ResponseListSnapshots{Snapshots: tc.expectedSnapshots}, resp) - - // Validate that heights were pruned correctly by querying the state at the last height that should be present relative to latest - // and the first height that should be pruned. - // - // Exceptions: - // * Prune nothing: should be able to query all heights (we only test first and latest) - // * Prune default: should be able to query all heights (we only test first and latest) - // * The reason for default behaving this way is that we only commit 20 heights but default has 100_000 keep-recent - var lastExistingHeight int64 - if tc.ssCfg.pruningOpts.GetPruningStrategy() == pruningtypes.PruningNothing || tc.ssCfg.pruningOpts.GetPruningStrategy() == pruningtypes.PruningDefault { - lastExistingHeight = 1 - } else { - // Integer division rounds down so by multiplying back we get the last height at which we pruned - lastExistingHeight = int64((tc.ssCfg.blocks/tc.ssCfg.pruningOpts.Interval)*tc.ssCfg.pruningOpts.Interval - tc.ssCfg.pruningOpts.KeepRecent) - } - - // Query 1 - res, err := suite.baseApp.Query(context.TODO(), &abci.RequestQuery{Path: fmt.Sprintf("/store/%s/key", capKey2.Name()), Data: []byte("0"), Height: lastExistingHeight}) - require.NoError(t, err) - require.NotNil(t, res, "height: %d", lastExistingHeight) - require.NotNil(t, res.Value, "height: %d", lastExistingHeight) - - // Query 2 - res, err = suite.baseApp.Query(context.TODO(), &abci.RequestQuery{Path: fmt.Sprintf("/store/%s/key", capKey2.Name()), Data: []byte("0"), Height: lastExistingHeight - 1}) - require.NoError(t, err) - require.NotNil(t, res, "height: %d", lastExistingHeight-1) - - if tc.ssCfg.pruningOpts.GetPruningStrategy() == pruningtypes.PruningNothing || tc.ssCfg.pruningOpts.GetPruningStrategy() == pruningtypes.PruningDefault { - // With prune nothing or default, we query height 0 which translates to the latest height. - require.NotNil(t, res.Value, "height: %d", lastExistingHeight-1) - } - }) - } -} - -func TestABCI_LoadSnapshotChunk(t *testing.T) { - ssCfg := SnapshotsConfig{ - blocks: 2, - blockTxs: 5, - snapshotInterval: 2, - snapshotKeepRecent: snapshottypes.CurrentFormat, - pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), - } - suite := NewBaseAppSuiteWithSnapshots(t, ssCfg) - - testCases := map[string]struct { - height uint64 - format uint32 - chunk uint32 - expectEmpty bool - }{ - "Existing snapshot": {2, snapshottypes.CurrentFormat, 1, false}, - "Missing height": {100, snapshottypes.CurrentFormat, 1, true}, - "Missing format": {2, snapshottypes.CurrentFormat + 1, 1, true}, - "Missing chunk": {2, snapshottypes.CurrentFormat, 9, true}, - "Zero height": {0, snapshottypes.CurrentFormat, 1, true}, - "Zero format": {2, 0, 1, true}, - "Zero chunk": {2, snapshottypes.CurrentFormat, 0, false}, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - resp, _ := suite.baseApp.LoadSnapshotChunk(&abci.RequestLoadSnapshotChunk{ - Height: tc.height, - Format: tc.format, - Chunk: tc.chunk, - }) - if tc.expectEmpty { - require.Equal(t, &abci.ResponseLoadSnapshotChunk{}, resp) - return - } - - require.NotEmpty(t, resp.Chunk) - }) - } -} - -func TestABCI_OfferSnapshot_Errors(t *testing.T) { - ssCfg := SnapshotsConfig{ - blocks: 0, - blockTxs: 0, - snapshotInterval: 2, - snapshotKeepRecent: 2, - pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), - } - suite := NewBaseAppSuiteWithSnapshots(t, ssCfg) - - m := snapshottypes.Metadata{ChunkHashes: [][]byte{{1}, {2}, {3}}} - metadata, err := m.Marshal() - require.NoError(t, err) - - hash := []byte{1, 2, 3} - - testCases := map[string]struct { - snapshot *abci.Snapshot - result abci.ResponseOfferSnapshot_Result - }{ - "nil snapshot": {nil, abci.ResponseOfferSnapshot_REJECT}, - "invalid format": {&abci.Snapshot{ - Height: 1, Format: 9, Chunks: 3, Hash: hash, Metadata: metadata, - }, abci.ResponseOfferSnapshot_REJECT_FORMAT}, - "incorrect chunk count": {&abci.Snapshot{ - Height: 1, Format: snapshottypes.CurrentFormat, Chunks: 2, Hash: hash, Metadata: metadata, - }, abci.ResponseOfferSnapshot_REJECT}, - "no chunks": {&abci.Snapshot{ - Height: 1, Format: snapshottypes.CurrentFormat, Chunks: 0, Hash: hash, Metadata: metadata, - }, abci.ResponseOfferSnapshot_REJECT}, - "invalid metadata serialization": {&abci.Snapshot{ - Height: 1, Format: snapshottypes.CurrentFormat, Chunks: 0, Hash: hash, Metadata: []byte{3, 1, 4}, - }, abci.ResponseOfferSnapshot_REJECT}, - } - for name, tc := range testCases { - tc := tc - t.Run(name, func(t *testing.T) { - resp, err := suite.baseApp.OfferSnapshot(&abci.RequestOfferSnapshot{Snapshot: tc.snapshot}) - require.NoError(t, err) - require.Equal(t, tc.result, resp.Result) - }) - } - - // Offering a snapshot after one has been accepted should error - resp, err := suite.baseApp.OfferSnapshot(&abci.RequestOfferSnapshot{Snapshot: &abci.Snapshot{ - Height: 1, - Format: snapshottypes.CurrentFormat, - Chunks: 3, - Hash: []byte{1, 2, 3}, - Metadata: metadata, - }}) - require.NoError(t, err) - require.Equal(t, &abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT}, resp) - - resp, err = suite.baseApp.OfferSnapshot(&abci.RequestOfferSnapshot{Snapshot: &abci.Snapshot{ - Height: 2, - Format: snapshottypes.CurrentFormat, - Chunks: 3, - Hash: []byte{1, 2, 3}, - Metadata: metadata, - }}) - require.NoError(t, err) - require.Equal(t, &abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, resp) -} - -func TestABCI_ApplySnapshotChunk(t *testing.T) { - srcCfg := SnapshotsConfig{ - blocks: 4, - blockTxs: 10, - snapshotInterval: 2, - snapshotKeepRecent: 2, - pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), - } - srcSuite := NewBaseAppSuiteWithSnapshots(t, srcCfg) - - targetCfg := SnapshotsConfig{ - blocks: 0, - blockTxs: 0, - snapshotInterval: 2, - snapshotKeepRecent: 2, - pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), - } - targetSuite := NewBaseAppSuiteWithSnapshots(t, targetCfg) - - // fetch latest snapshot to restore - respList, err := srcSuite.baseApp.ListSnapshots(&abci.RequestListSnapshots{}) - require.NoError(t, err) - require.NotEmpty(t, respList.Snapshots) - snapshot := respList.Snapshots[0] - - // make sure the snapshot has at least 3 chunks - require.GreaterOrEqual(t, snapshot.Chunks, uint32(3), "Not enough snapshot chunks") - - // begin a snapshot restoration in the target - respOffer, err := targetSuite.baseApp.OfferSnapshot(&abci.RequestOfferSnapshot{Snapshot: snapshot}) - require.NoError(t, err) - require.Equal(t, &abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT}, respOffer) - - // We should be able to pass an invalid chunk and get a verify failure, before - // reapplying it. - respApply, err := targetSuite.baseApp.ApplySnapshotChunk(&abci.RequestApplySnapshotChunk{ - Index: 0, - Chunk: []byte{9}, - Sender: "sender", - }) - require.NoError(t, err) - require.Equal(t, &abci.ResponseApplySnapshotChunk{ - Result: abci.ResponseApplySnapshotChunk_RETRY, - RefetchChunks: []uint32{0}, - RejectSenders: []string{"sender"}, - }, respApply) - - // fetch each chunk from the source and apply it to the target - for index := uint32(0); index < snapshot.Chunks; index++ { - respChunk, err := srcSuite.baseApp.LoadSnapshotChunk(&abci.RequestLoadSnapshotChunk{ - Height: snapshot.Height, - Format: snapshot.Format, - Chunk: index, - }) - require.NoError(t, err) - require.NotNil(t, respChunk.Chunk) - - respApply, err := targetSuite.baseApp.ApplySnapshotChunk(&abci.RequestApplySnapshotChunk{ - Index: index, - Chunk: respChunk.Chunk, - }) - require.NoError(t, err) - require.Equal(t, &abci.ResponseApplySnapshotChunk{ - Result: abci.ResponseApplySnapshotChunk_ACCEPT, - }, respApply) - } - - // the target should now have the same hash as the source - require.Equal(t, srcSuite.baseApp.LastCommitID(), targetSuite.baseApp.LastCommitID()) -} diff --git a/baseapp/state.go b/baseapp/state.go deleted file mode 100644 index ddfb82f92d..0000000000 --- a/baseapp/state.go +++ /dev/null @@ -1,23 +0,0 @@ -package baseapp - -import ( - storetypes "cosmossdk.io/store/types" - - sdk "github.com/cosmos/cosmos-sdk/types" -) - -type state struct { - ms storetypes.CacheMultiStore - ctx sdk.Context -} - -// CacheMultiStore calls and returns a CacheMultiStore on the state's underling -// CacheMultiStore. -func (st *state) CacheMultiStore() storetypes.CacheMultiStore { - return st.ms.CacheMultiStore() -} - -// Context returns the Context of the state. -func (st *state) Context() sdk.Context { - return st.ctx -} diff --git a/baseapp/streaming.go b/baseapp/streaming.go deleted file mode 100644 index c978d959aa..0000000000 --- a/baseapp/streaming.go +++ /dev/null @@ -1,112 +0,0 @@ -package baseapp - -import ( - "fmt" - "sort" - "strings" - - "github.com/spf13/cast" - - "cosmossdk.io/store/streaming" - storetypes "cosmossdk.io/store/types" - - "github.com/cosmos/cosmos-sdk/client/flags" - servertypes "github.com/cosmos/cosmos-sdk/server/types" -) - -const ( - StreamingTomlKey = "streaming" - StreamingABCITomlKey = "abci" - StreamingABCIPluginTomlKey = "plugin" - StreamingABCIKeysTomlKey = "keys" - StreamingABCIStopNodeOnErrTomlKey = "stop-node-on-err" -) - -// RegisterStreamingServices registers streaming services with the BaseApp. -func (app *BaseApp) RegisterStreamingServices(appOpts servertypes.AppOptions, keys map[string]*storetypes.KVStoreKey) error { - // register streaming services - streamingCfg := cast.ToStringMap(appOpts.Get(StreamingTomlKey)) - for service := range streamingCfg { - pluginKey := fmt.Sprintf("%s.%s.%s", StreamingTomlKey, service, StreamingABCIPluginTomlKey) - pluginName := strings.TrimSpace(cast.ToString(appOpts.Get(pluginKey))) - if len(pluginName) > 0 { - logLevel := cast.ToString(appOpts.Get(flags.FlagLogLevel)) - plugin, err := streaming.NewStreamingPlugin(pluginName, logLevel) - if err != nil { - return fmt.Errorf("failed to load streaming plugin: %w", err) - } - if err := app.registerStreamingPlugin(appOpts, keys, plugin); err != nil { - return fmt.Errorf("failed to register streaming plugin %w", err) - } - } - } - - return nil -} - -// registerStreamingPlugin registers streaming plugins with the BaseApp. -func (app *BaseApp) registerStreamingPlugin( - appOpts servertypes.AppOptions, - keys map[string]*storetypes.KVStoreKey, - streamingPlugin interface{}, -) error { - v, ok := streamingPlugin.(storetypes.ABCIListener) - if !ok { - return fmt.Errorf("unexpected plugin type %T", v) - } - - app.registerABCIListenerPlugin(appOpts, keys, v) - return nil -} - -// registerABCIListenerPlugin registers plugins that implement the ABCIListener interface. -func (app *BaseApp) registerABCIListenerPlugin( - appOpts servertypes.AppOptions, - keys map[string]*storetypes.KVStoreKey, - abciListener storetypes.ABCIListener, -) { - stopNodeOnErrKey := fmt.Sprintf("%s.%s.%s", StreamingTomlKey, StreamingABCITomlKey, StreamingABCIStopNodeOnErrTomlKey) - stopNodeOnErr := cast.ToBool(appOpts.Get(stopNodeOnErrKey)) - keysKey := fmt.Sprintf("%s.%s.%s", StreamingTomlKey, StreamingABCITomlKey, StreamingABCIKeysTomlKey) - exposeKeysStr := cast.ToStringSlice(appOpts.Get(keysKey)) - exposedKeys := exposeStoreKeysSorted(exposeKeysStr, keys) - app.cms.AddListeners(exposedKeys) - app.SetStreamingManager( - storetypes.StreamingManager{ - ABCIListeners: []storetypes.ABCIListener{abciListener}, - StopNodeOnErr: stopNodeOnErr, - }, - ) -} - -func exposeAll(list []string) bool { - for _, ele := range list { - if ele == "*" { - return true - } - } - return false -} - -func exposeStoreKeysSorted(keysStr []string, keys map[string]*storetypes.KVStoreKey) []storetypes.StoreKey { - var exposeStoreKeys []storetypes.StoreKey - if exposeAll(keysStr) { - exposeStoreKeys = make([]storetypes.StoreKey, 0, len(keys)) - for key := range keys { - exposeStoreKeys = append(exposeStoreKeys, keys[key]) - } - } else { - exposeStoreKeys = make([]storetypes.StoreKey, 0, len(keysStr)) - for _, keyStr := range keysStr { - if storeKey, ok := keys[keyStr]; ok { - exposeStoreKeys = append(exposeStoreKeys, storeKey) - } - } - } - // sort storeKeys for deterministic output - sort.SliceStable(exposeStoreKeys, func(i, j int) bool { - return exposeStoreKeys[i].Name() < exposeStoreKeys[j].Name() - }) - - return exposeStoreKeys -} diff --git a/baseapp/streaming_test.go b/baseapp/streaming_test.go deleted file mode 100644 index 68ab1320ef..0000000000 --- a/baseapp/streaming_test.go +++ /dev/null @@ -1,146 +0,0 @@ -package baseapp_test - -import ( - "context" - "fmt" - "testing" - - abci "github.com/cometbft/cometbft/abci/types" - tmproto "github.com/cometbft/cometbft/proto/tendermint/types" - "github.com/stretchr/testify/require" - - storetypes "cosmossdk.io/store/types" - - "github.com/cosmos/cosmos-sdk/baseapp" - baseapptestutil "github.com/cosmos/cosmos-sdk/baseapp/testutil" -) - -var _ storetypes.ABCIListener = (*MockABCIListener)(nil) - -type MockABCIListener struct { - name string - ChangeSet []*storetypes.StoreKVPair -} - -func NewMockABCIListener(name string) MockABCIListener { - return MockABCIListener{ - name: name, - ChangeSet: make([]*storetypes.StoreKVPair, 0), - } -} - -func (m MockABCIListener) ListenFinalizeBlock(_ context.Context, _ abci.RequestFinalizeBlock, _ abci.ResponseFinalizeBlock) error { - return nil -} - -func (m *MockABCIListener) ListenCommit(_ context.Context, _ abci.ResponseCommit, cs []*storetypes.StoreKVPair) error { - m.ChangeSet = cs - return nil -} - -var distKey1 = storetypes.NewKVStoreKey("distKey1") - -func TestABCI_MultiListener_StateChanges(t *testing.T) { - anteKey := []byte("ante-key") - anteOpt := func(bapp *baseapp.BaseApp) { bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) } - distOpt := func(bapp *baseapp.BaseApp) { bapp.MountStores(distKey1) } - mockListener1 := NewMockABCIListener("lis_1") - mockListener2 := NewMockABCIListener("lis_2") - streamingManager := storetypes.StreamingManager{ABCIListeners: []storetypes.ABCIListener{&mockListener1, &mockListener2}} - streamingManagerOpt := func(bapp *baseapp.BaseApp) { bapp.SetStreamingManager(streamingManager) } - addListenerOpt := func(bapp *baseapp.BaseApp) { bapp.CommitMultiStore().AddListeners([]storetypes.StoreKey{distKey1}) } - suite := NewBaseAppSuite(t, anteOpt, distOpt, streamingManagerOpt, addListenerOpt) - - suite.baseApp.InitChain( - &abci.RequestInitChain{ - ConsensusParams: &tmproto.ConsensusParams{}, - }, - ) - - deliverKey := []byte("deliver-key") - baseapptestutil.RegisterCounterServer(suite.baseApp.MsgServiceRouter(), CounterServerImpl{t, capKey1, deliverKey}) - - nBlocks := 3 - txPerHeight := 5 - - for blockN := 0; blockN < nBlocks; blockN++ { - txs := [][]byte{} - - var expectedChangeSet []*storetypes.StoreKVPair - - // create final block context state - _, err := suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{Height: int64(blockN) + 1, Txs: txs}) - require.NoError(t, err) - - for i := 0; i < txPerHeight; i++ { - counter := int64(blockN*txPerHeight + i) - tx := newTxCounter(t, suite.txConfig, counter, counter) - - txBytes, err := suite.txConfig.TxEncoder()(tx) - require.NoError(t, err) - - sKey := []byte(fmt.Sprintf("distKey%d", i)) - sVal := []byte(fmt.Sprintf("distVal%d", i)) - store := getFinalizeBlockStateCtx(suite.baseApp).KVStore(distKey1) - store.Set(sKey, sVal) - - expectedChangeSet = append(expectedChangeSet, &storetypes.StoreKVPair{ - StoreKey: distKey1.Name(), - Delete: false, - Key: sKey, - Value: sVal, - }) - - txs = append(txs, txBytes) - } - - res, err := suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{Height: int64(blockN) + 1, Txs: txs}) - require.NoError(t, err) - for _, tx := range res.TxResults { - events := tx.GetEvents() - require.Len(t, events, 3, "should contain ante handler, message type and counter events respectively") - // require.Equal(t, sdk.MarkEventsToIndex(counterEvent("ante_handler", counter).ToABCIEvents(), map[string]struct{}{})[0], events[0], "ante handler event") - // require.Equal(t, sdk.MarkEventsToIndex(counterEvent(sdk.EventTypeMessage, counter).ToABCIEvents(), map[string]struct{}{})[0], events[2], "msg handler update counter event") - } - - suite.baseApp.Commit() - - require.Equal(t, expectedChangeSet, mockListener1.ChangeSet, "should contain the same changeSet") - require.Equal(t, expectedChangeSet, mockListener2.ChangeSet, "should contain the same changeSet") - } -} - -func Test_Ctx_with_StreamingManager(t *testing.T) { - mockListener1 := NewMockABCIListener("lis_1") - mockListener2 := NewMockABCIListener("lis_2") - listeners := []storetypes.ABCIListener{&mockListener1, &mockListener2} - streamingManager := storetypes.StreamingManager{ABCIListeners: listeners, StopNodeOnErr: true} - streamingManagerOpt := func(bapp *baseapp.BaseApp) { bapp.SetStreamingManager(streamingManager) } - addListenerOpt := func(bapp *baseapp.BaseApp) { bapp.CommitMultiStore().AddListeners([]storetypes.StoreKey{distKey1}) } - suite := NewBaseAppSuite(t, streamingManagerOpt, addListenerOpt) - - suite.baseApp.InitChain(&abci.RequestInitChain{ - ConsensusParams: &tmproto.ConsensusParams{}, - }) - - ctx := getFinalizeBlockStateCtx(suite.baseApp) - sm := ctx.StreamingManager() - require.NotNil(t, sm, fmt.Sprintf("nil StreamingManager: %v", sm)) - require.Equal(t, listeners, sm.ABCIListeners, fmt.Sprintf("should contain same listeners: %v", listeners)) - require.Equal(t, true, sm.StopNodeOnErr, "should contain StopNodeOnErr = true") - - nBlocks := 2 - - for blockN := 0; blockN < nBlocks; blockN++ { - - suite.baseApp.FinalizeBlock(&abci.RequestFinalizeBlock{Height: int64(blockN) + 1}) - - ctx := getFinalizeBlockStateCtx(suite.baseApp) - sm := ctx.StreamingManager() - require.NotNil(t, sm, fmt.Sprintf("nil StreamingManager: %v", sm)) - require.Equal(t, listeners, sm.ABCIListeners, fmt.Sprintf("should contain same listeners: %v", listeners)) - require.Equal(t, true, sm.StopNodeOnErr, "should contain StopNodeOnErr = true") - - suite.baseApp.Commit() - } -} diff --git a/baseapp/test_helpers.go b/baseapp/test_helpers.go deleted file mode 100644 index db603f2f29..0000000000 --- a/baseapp/test_helpers.go +++ /dev/null @@ -1,78 +0,0 @@ -package baseapp - -import ( - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" - - errorsmod "cosmossdk.io/errors" - - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" -) - -// SimCheck defines a CheckTx helper function that used in tests and simulations. -func (app *BaseApp) SimCheck(txEncoder sdk.TxEncoder, tx sdk.Tx) (sdk.GasInfo, *sdk.Result, error) { - // runTx expects tx bytes as argument, so we encode the tx argument into - // bytes. Note that runTx will actually decode those bytes again. But since - // this helper is only used in tests/simulation, it's fine. - bz, err := txEncoder(tx) - if err != nil { - return sdk.GasInfo{}, nil, errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "%s", err) - } - - gasInfo, result, _, err := app.runTx(execModeCheck, bz) - return gasInfo, result, err -} - -// Simulate executes a tx in simulate mode to get result and gas info. -func (app *BaseApp) Simulate(txBytes []byte) (sdk.GasInfo, *sdk.Result, error) { - gasInfo, result, _, err := app.runTx(execModeSimulate, txBytes) - return gasInfo, result, err -} - -func (app *BaseApp) SimDeliver(txEncoder sdk.TxEncoder, tx sdk.Tx) (sdk.GasInfo, *sdk.Result, error) { - // See comment for Check(). - bz, err := txEncoder(tx) - if err != nil { - return sdk.GasInfo{}, nil, errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "%s", err) - } - gasInfo, result, _, err := app.runTx(execModeFinalize, bz) - return gasInfo, result, err -} - -func (app *BaseApp) SimTxFinalizeBlock(txEncoder sdk.TxEncoder, tx sdk.Tx) (sdk.GasInfo, *sdk.Result, error) { - // See comment for Check(). - bz, err := txEncoder(tx) - if err != nil { - return sdk.GasInfo{}, nil, errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "%s", err) - } - - gasInfo, result, _, err := app.runTx(execModeFinalize, bz) - return gasInfo, result, err -} - -// NewContextLegacy returns a new sdk.Context with the provided header -func (app *BaseApp) NewContextLegacy(isCheckTx bool, header cmtproto.Header) sdk.Context { - if isCheckTx { - return sdk.NewContext(app.checkState.ms, header, true, app.logger). - WithMinGasPrices(app.minGasPrices) - } - - return sdk.NewContext(app.finalizeBlockState.ms, header, false, app.logger) -} - -// NewContext returns a new sdk.Context with a empty header -func (app *BaseApp) NewContext(isCheckTx bool) sdk.Context { - return app.NewContextLegacy(isCheckTx, cmtproto.Header{}) -} - -func (app *BaseApp) NewUncachedContext(isCheckTx bool, header cmtproto.Header) sdk.Context { - return sdk.NewContext(app.cms, header, isCheckTx, app.logger) -} - -func (app *BaseApp) GetContextForFinalizeBlock(txBytes []byte) sdk.Context { - return app.getContextForTx(execModeFinalize, txBytes) -} - -func (app *BaseApp) GetContextForCheckTx(txBytes []byte) sdk.Context { - return app.getContextForTx(execModeCheck, txBytes) -} diff --git a/baseapp/testutil/buf.gen.yaml b/baseapp/testutil/buf.gen.yaml deleted file mode 100644 index d7d17bbb26..0000000000 --- a/baseapp/testutil/buf.gen.yaml +++ /dev/null @@ -1,5 +0,0 @@ -version: v1 -plugins: - - name: gocosmos - out: ../.. - opt: plugins=grpc,Mgoogle/protobuf/any.proto=github.com/cosmos/cosmos-sdk/codec/types diff --git a/baseapp/testutil/buf.lock b/baseapp/testutil/buf.lock deleted file mode 100644 index 89f0d19044..0000000000 --- a/baseapp/testutil/buf.lock +++ /dev/null @@ -1,23 +0,0 @@ -# Generated by buf. DO NOT EDIT. -version: v1 -deps: - - remote: buf.build - owner: cosmos - repository: cosmos-proto - commit: 1935555c206d4afb9e94615dfd0fad31 - digest: shake256:c74d91a3ac7ae07d579e90eee33abf9b29664047ac8816500cf22c081fec0d72d62c89ce0bebafc1f6fec7aa5315be72606717740ca95007248425102c365377 - - remote: buf.build - owner: cosmos - repository: cosmos-sdk - commit: 9d547dbea90f47afbe1898388fcebffb - digest: shake256:63237398fb2043153c81bbe91ce52a832bca02d4307334b62fcc9914ce6f12fea59388eb5102949255054973f7022f581e02f97ed1f69a6585d2d00fb1da5833 - - remote: buf.build - owner: cosmos - repository: gogo-proto - commit: 5e5b9fdd01804356895f8f79a6f1ddc1 - digest: shake256:0b85da49e2e5f9ebc4806eae058e2f56096ff3b1c59d1fb7c190413dd15f45dd456f0b69ced9059341c80795d2b6c943de15b120a9e0308b499e43e4b5fc2952 - - remote: buf.build - owner: googleapis - repository: googleapis - commit: cc916c31859748a68fd229a3c8d7a2e8 - digest: shake256:469b049d0eb04203d5272062636c078decefc96fec69739159c25d85349c50c34c7706918a8b216c5c27f76939df48452148cff8c5c3ae77fa6ba5c25c1b8bf8 diff --git a/baseapp/testutil/buf.yaml b/baseapp/testutil/buf.yaml deleted file mode 100644 index b0edfb59a9..0000000000 --- a/baseapp/testutil/buf.yaml +++ /dev/null @@ -1,5 +0,0 @@ -version: v1 -deps: - - buf.build/cosmos/cosmos-sdk - - buf.build/cosmos/gogo-proto - - buf.build/cosmos/cosmos-proto diff --git a/baseapp/testutil/messages.go b/baseapp/testutil/messages.go deleted file mode 100644 index a4b1cd9abb..0000000000 --- a/baseapp/testutil/messages.go +++ /dev/null @@ -1,65 +0,0 @@ -package testutil - -import ( - errorsmod "cosmossdk.io/errors" - - "github.com/cosmos/cosmos-sdk/codec/types" - "github.com/cosmos/cosmos-sdk/crypto/codec" - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" - "github.com/cosmos/cosmos-sdk/types/msgservice" -) - -func RegisterInterfaces(registry types.InterfaceRegistry) { - registry.RegisterImplementations( - (*sdk.Msg)(nil), - &MsgCounter{}, - &MsgCounter2{}, - &MsgKeyValue{}, - ) - msgservice.RegisterMsgServiceDesc(registry, &_Counter_serviceDesc) - msgservice.RegisterMsgServiceDesc(registry, &_Counter2_serviceDesc) - msgservice.RegisterMsgServiceDesc(registry, &_KeyValue_serviceDesc) - - codec.RegisterInterfaces(registry) -} - -var _ sdk.Msg = &MsgCounter{} - -func (msg *MsgCounter) GetSigners() []sdk.AccAddress { return []sdk.AccAddress{} } -func (msg *MsgCounter) ValidateBasic() error { - if msg.Counter >= 0 { - return nil - } - return errorsmod.Wrap(sdkerrors.ErrInvalidSequence, "counter should be a non-negative integer") -} - -var _ sdk.Msg = &MsgCounter2{} - -func (msg *MsgCounter2) GetSigners() []sdk.AccAddress { return []sdk.AccAddress{} } -func (msg *MsgCounter2) ValidateBasic() error { - if msg.Counter >= 0 { - return nil - } - return errorsmod.Wrap(sdkerrors.ErrInvalidSequence, "counter should be a non-negative integer") -} - -var _ sdk.Msg = &MsgKeyValue{} - -func (msg *MsgKeyValue) GetSigners() []sdk.AccAddress { - if len(msg.Signer) == 0 { - return []sdk.AccAddress{} - } - - return []sdk.AccAddress{sdk.MustAccAddressFromBech32(msg.Signer)} -} - -func (msg *MsgKeyValue) ValidateBasic() error { - if msg.Key == nil { - return errorsmod.Wrap(sdkerrors.ErrInvalidRequest, "key cannot be nil") - } - if msg.Value == nil { - return errorsmod.Wrap(sdkerrors.ErrInvalidRequest, "value cannot be nil") - } - return nil -} diff --git a/baseapp/testutil/messages.pb.go b/baseapp/testutil/messages.pb.go deleted file mode 100644 index 2884ff0f64..0000000000 --- a/baseapp/testutil/messages.pb.go +++ /dev/null @@ -1,1397 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: messages.proto - -package testutil - -import ( - context "context" - fmt "fmt" - _ "github.com/cosmos/cosmos-sdk/codec/types" - _ "github.com/cosmos/cosmos-sdk/types/msgservice" - _ "github.com/cosmos/gogoproto/gogoproto" - grpc1 "github.com/cosmos/gogoproto/grpc" - proto "github.com/cosmos/gogoproto/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type MsgCounter struct { - Counter int64 `protobuf:"varint,1,opt,name=counter,proto3" json:"counter,omitempty"` - FailOnHandler bool `protobuf:"varint,2,opt,name=fail_on_handler,json=failOnHandler,proto3" json:"fail_on_handler,omitempty"` - Signer string `protobuf:"bytes,3,opt,name=signer,proto3" json:"signer,omitempty"` -} - -func (m *MsgCounter) Reset() { *m = MsgCounter{} } -func (m *MsgCounter) String() string { return proto.CompactTextString(m) } -func (*MsgCounter) ProtoMessage() {} -func (*MsgCounter) Descriptor() ([]byte, []int) { - return fileDescriptor_4dc296cbfe5ffcd5, []int{0} -} -func (m *MsgCounter) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCounter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCounter.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCounter) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCounter.Merge(m, src) -} -func (m *MsgCounter) XXX_Size() int { - return m.Size() -} -func (m *MsgCounter) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCounter.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCounter proto.InternalMessageInfo - -func (m *MsgCounter) GetCounter() int64 { - if m != nil { - return m.Counter - } - return 0 -} - -func (m *MsgCounter) GetFailOnHandler() bool { - if m != nil { - return m.FailOnHandler - } - return false -} - -func (m *MsgCounter) GetSigner() string { - if m != nil { - return m.Signer - } - return "" -} - -type MsgCounter2 struct { - Counter int64 `protobuf:"varint,1,opt,name=counter,proto3" json:"counter,omitempty"` - FailOnHandler bool `protobuf:"varint,2,opt,name=fail_on_handler,json=failOnHandler,proto3" json:"fail_on_handler,omitempty"` - Signer string `protobuf:"bytes,3,opt,name=signer,proto3" json:"signer,omitempty"` -} - -func (m *MsgCounter2) Reset() { *m = MsgCounter2{} } -func (m *MsgCounter2) String() string { return proto.CompactTextString(m) } -func (*MsgCounter2) ProtoMessage() {} -func (*MsgCounter2) Descriptor() ([]byte, []int) { - return fileDescriptor_4dc296cbfe5ffcd5, []int{1} -} -func (m *MsgCounter2) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCounter2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCounter2.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCounter2) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCounter2.Merge(m, src) -} -func (m *MsgCounter2) XXX_Size() int { - return m.Size() -} -func (m *MsgCounter2) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCounter2.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCounter2 proto.InternalMessageInfo - -func (m *MsgCounter2) GetCounter() int64 { - if m != nil { - return m.Counter - } - return 0 -} - -func (m *MsgCounter2) GetFailOnHandler() bool { - if m != nil { - return m.FailOnHandler - } - return false -} - -func (m *MsgCounter2) GetSigner() string { - if m != nil { - return m.Signer - } - return "" -} - -type MsgCreateCounterResponse struct { -} - -func (m *MsgCreateCounterResponse) Reset() { *m = MsgCreateCounterResponse{} } -func (m *MsgCreateCounterResponse) String() string { return proto.CompactTextString(m) } -func (*MsgCreateCounterResponse) ProtoMessage() {} -func (*MsgCreateCounterResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_4dc296cbfe5ffcd5, []int{2} -} -func (m *MsgCreateCounterResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCreateCounterResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCreateCounterResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCreateCounterResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCreateCounterResponse.Merge(m, src) -} -func (m *MsgCreateCounterResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgCreateCounterResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCreateCounterResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCreateCounterResponse proto.InternalMessageInfo - -type MsgKeyValue struct { - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - Signer string `protobuf:"bytes,3,opt,name=signer,proto3" json:"signer,omitempty"` -} - -func (m *MsgKeyValue) Reset() { *m = MsgKeyValue{} } -func (m *MsgKeyValue) String() string { return proto.CompactTextString(m) } -func (*MsgKeyValue) ProtoMessage() {} -func (*MsgKeyValue) Descriptor() ([]byte, []int) { - return fileDescriptor_4dc296cbfe5ffcd5, []int{3} -} -func (m *MsgKeyValue) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgKeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgKeyValue.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgKeyValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgKeyValue.Merge(m, src) -} -func (m *MsgKeyValue) XXX_Size() int { - return m.Size() -} -func (m *MsgKeyValue) XXX_DiscardUnknown() { - xxx_messageInfo_MsgKeyValue.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgKeyValue proto.InternalMessageInfo - -func (m *MsgKeyValue) GetKey() []byte { - if m != nil { - return m.Key - } - return nil -} - -func (m *MsgKeyValue) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func (m *MsgKeyValue) GetSigner() string { - if m != nil { - return m.Signer - } - return "" -} - -type MsgCreateKeyValueResponse struct { -} - -func (m *MsgCreateKeyValueResponse) Reset() { *m = MsgCreateKeyValueResponse{} } -func (m *MsgCreateKeyValueResponse) String() string { return proto.CompactTextString(m) } -func (*MsgCreateKeyValueResponse) ProtoMessage() {} -func (*MsgCreateKeyValueResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_4dc296cbfe5ffcd5, []int{4} -} -func (m *MsgCreateKeyValueResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCreateKeyValueResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCreateKeyValueResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCreateKeyValueResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCreateKeyValueResponse.Merge(m, src) -} -func (m *MsgCreateKeyValueResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgCreateKeyValueResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCreateKeyValueResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCreateKeyValueResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*MsgCounter)(nil), "MsgCounter") - proto.RegisterType((*MsgCounter2)(nil), "MsgCounter2") - proto.RegisterType((*MsgCreateCounterResponse)(nil), "MsgCreateCounterResponse") - proto.RegisterType((*MsgKeyValue)(nil), "MsgKeyValue") - proto.RegisterType((*MsgCreateKeyValueResponse)(nil), "MsgCreateKeyValueResponse") -} - -func init() { proto.RegisterFile("messages.proto", fileDescriptor_4dc296cbfe5ffcd5) } - -var fileDescriptor_4dc296cbfe5ffcd5 = []byte{ - // 390 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x92, 0xcf, 0xaa, 0xd3, 0x40, - 0x14, 0xc6, 0x1b, 0x83, 0x6d, 0x3d, 0xad, 0x5a, 0x42, 0xd1, 0x34, 0x42, 0x28, 0x5d, 0x48, 0x11, - 0x9a, 0xc1, 0xb8, 0x6b, 0x77, 0x8a, 0x54, 0x11, 0x11, 0x22, 0xb8, 0xe8, 0xa6, 0x4c, 0xd2, 0xd3, - 0x69, 0x68, 0x32, 0x13, 0x32, 0x93, 0x42, 0xb7, 0x3e, 0x81, 0x8f, 0xe2, 0x63, 0xb8, 0xec, 0xd2, - 0xa5, 0xb4, 0x0b, 0x5f, 0x43, 0xf2, 0xaf, 0x75, 0x71, 0x7b, 0xb9, 0xab, 0xbb, 0x9a, 0xf3, 0x7d, - 0x87, 0x9c, 0xdf, 0xc9, 0xc7, 0x81, 0x27, 0x31, 0x4a, 0x49, 0x19, 0x4a, 0x27, 0x49, 0x85, 0x12, - 0x56, 0x9f, 0x09, 0x26, 0x8a, 0x92, 0xe4, 0x55, 0xe5, 0x0e, 0x98, 0x10, 0x2c, 0x42, 0x52, 0x28, - 0x3f, 0x5b, 0x13, 0xca, 0xf7, 0x55, 0xeb, 0x79, 0x20, 0x64, 0x2c, 0x24, 0x89, 0x25, 0x23, 0xbb, - 0xd7, 0xf9, 0x53, 0x36, 0x46, 0x12, 0xe0, 0xb3, 0x64, 0xef, 0x44, 0xc6, 0x15, 0xa6, 0x86, 0x09, - 0xad, 0xa0, 0x2c, 0x4d, 0x6d, 0xa8, 0x8d, 0x75, 0xaf, 0x96, 0xc6, 0x4b, 0x78, 0xba, 0xa6, 0x61, - 0xb4, 0x14, 0x7c, 0xb9, 0xa1, 0x7c, 0x15, 0x61, 0x6a, 0x3e, 0x18, 0x6a, 0xe3, 0xb6, 0xf7, 0x38, - 0xb7, 0xbf, 0xf0, 0x0f, 0xa5, 0x69, 0x3c, 0x83, 0xa6, 0x0c, 0x19, 0xc7, 0xd4, 0xd4, 0x87, 0xda, - 0xf8, 0x91, 0x57, 0xa9, 0x69, 0xe7, 0xfb, 0xdf, 0x9f, 0xaf, 0x2a, 0x31, 0x52, 0xd0, 0xb9, 0x40, - 0xdd, 0xfb, 0xa2, 0x5a, 0x60, 0xe6, 0xd4, 0x14, 0xa9, 0xc2, 0x8a, 0xed, 0xa1, 0x4c, 0x04, 0x97, - 0x38, 0x5a, 0x14, 0x1b, 0x7d, 0xc2, 0xfd, 0x37, 0x1a, 0x65, 0x68, 0xf4, 0x40, 0xdf, 0xe2, 0xbe, - 0xd8, 0xa6, 0xeb, 0xe5, 0xa5, 0xd1, 0x87, 0x87, 0xbb, 0xbc, 0x55, 0xf0, 0xbb, 0x5e, 0x29, 0xee, - 0xc6, 0x7d, 0x01, 0x83, 0x33, 0xb7, 0x26, 0xd4, 0x60, 0xf7, 0x3d, 0xb4, 0xea, 0xf0, 0xa7, 0xd0, - 0xfb, 0xc8, 0x83, 0x14, 0x63, 0xe4, 0xaa, 0xf6, 0x3a, 0xce, 0x25, 0x28, 0x6b, 0xe0, 0x5c, 0xdb, - 0xdf, 0x9d, 0x43, 0xfb, 0x1c, 0xe7, 0xec, 0x86, 0x39, 0xdd, 0xff, 0xe6, 0xb8, 0xb7, 0x0d, 0x9a, - 0x41, 0xfb, 0x9c, 0x02, 0x01, 0xfd, 0x2b, 0xaa, 0xf2, 0xdb, 0xda, 0xb4, 0x2c, 0xe7, 0xea, 0xcf, - 0xbc, 0x9d, 0xff, 0x3a, 0xda, 0xda, 0xe1, 0x68, 0x6b, 0x7f, 0x8e, 0xb6, 0xf6, 0xe3, 0x64, 0x37, - 0x0e, 0x27, 0xbb, 0xf1, 0xfb, 0x64, 0x37, 0x16, 0x13, 0x16, 0xaa, 0x4d, 0xe6, 0x3b, 0x81, 0x88, - 0x49, 0x75, 0x8a, 0xe5, 0x33, 0x91, 0xab, 0x2d, 0xf1, 0xa9, 0x44, 0x9a, 0x24, 0x44, 0xa1, 0x54, - 0x99, 0x0a, 0x23, 0xbf, 0x59, 0x1c, 0xe7, 0x9b, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x44, 0x91, - 0x2d, 0xb3, 0xf8, 0x02, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// CounterClient is the client API for Counter service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type CounterClient interface { - IncrementCounter(ctx context.Context, in *MsgCounter, opts ...grpc.CallOption) (*MsgCreateCounterResponse, error) -} - -type counterClient struct { - cc grpc1.ClientConn -} - -func NewCounterClient(cc grpc1.ClientConn) CounterClient { - return &counterClient{cc} -} - -func (c *counterClient) IncrementCounter(ctx context.Context, in *MsgCounter, opts ...grpc.CallOption) (*MsgCreateCounterResponse, error) { - out := new(MsgCreateCounterResponse) - err := c.cc.Invoke(ctx, "/Counter/IncrementCounter", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// CounterServer is the server API for Counter service. -type CounterServer interface { - IncrementCounter(context.Context, *MsgCounter) (*MsgCreateCounterResponse, error) -} - -// UnimplementedCounterServer can be embedded to have forward compatible implementations. -type UnimplementedCounterServer struct { -} - -func (*UnimplementedCounterServer) IncrementCounter(ctx context.Context, req *MsgCounter) (*MsgCreateCounterResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method IncrementCounter not implemented") -} - -func RegisterCounterServer(s grpc1.Server, srv CounterServer) { - s.RegisterService(&_Counter_serviceDesc, srv) -} - -func _Counter_IncrementCounter_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgCounter) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(CounterServer).IncrementCounter(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/Counter/IncrementCounter", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(CounterServer).IncrementCounter(ctx, req.(*MsgCounter)) - } - return interceptor(ctx, in, info, handler) -} - -var _Counter_serviceDesc = grpc.ServiceDesc{ - ServiceName: "Counter", - HandlerType: (*CounterServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "IncrementCounter", - Handler: _Counter_IncrementCounter_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "messages.proto", -} - -// Counter2Client is the client API for Counter2 service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type Counter2Client interface { - IncrementCounter(ctx context.Context, in *MsgCounter2, opts ...grpc.CallOption) (*MsgCreateCounterResponse, error) -} - -type counter2Client struct { - cc grpc1.ClientConn -} - -func NewCounter2Client(cc grpc1.ClientConn) Counter2Client { - return &counter2Client{cc} -} - -func (c *counter2Client) IncrementCounter(ctx context.Context, in *MsgCounter2, opts ...grpc.CallOption) (*MsgCreateCounterResponse, error) { - out := new(MsgCreateCounterResponse) - err := c.cc.Invoke(ctx, "/Counter2/IncrementCounter", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Counter2Server is the server API for Counter2 service. -type Counter2Server interface { - IncrementCounter(context.Context, *MsgCounter2) (*MsgCreateCounterResponse, error) -} - -// UnimplementedCounter2Server can be embedded to have forward compatible implementations. -type UnimplementedCounter2Server struct { -} - -func (*UnimplementedCounter2Server) IncrementCounter(ctx context.Context, req *MsgCounter2) (*MsgCreateCounterResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method IncrementCounter not implemented") -} - -func RegisterCounter2Server(s grpc1.Server, srv Counter2Server) { - s.RegisterService(&_Counter2_serviceDesc, srv) -} - -func _Counter2_IncrementCounter_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgCounter2) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(Counter2Server).IncrementCounter(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/Counter2/IncrementCounter", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(Counter2Server).IncrementCounter(ctx, req.(*MsgCounter2)) - } - return interceptor(ctx, in, info, handler) -} - -var _Counter2_serviceDesc = grpc.ServiceDesc{ - ServiceName: "Counter2", - HandlerType: (*Counter2Server)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "IncrementCounter", - Handler: _Counter2_IncrementCounter_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "messages.proto", -} - -// KeyValueClient is the client API for KeyValue service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type KeyValueClient interface { - Set(ctx context.Context, in *MsgKeyValue, opts ...grpc.CallOption) (*MsgCreateKeyValueResponse, error) -} - -type keyValueClient struct { - cc grpc1.ClientConn -} - -func NewKeyValueClient(cc grpc1.ClientConn) KeyValueClient { - return &keyValueClient{cc} -} - -func (c *keyValueClient) Set(ctx context.Context, in *MsgKeyValue, opts ...grpc.CallOption) (*MsgCreateKeyValueResponse, error) { - out := new(MsgCreateKeyValueResponse) - err := c.cc.Invoke(ctx, "/KeyValue/Set", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// KeyValueServer is the server API for KeyValue service. -type KeyValueServer interface { - Set(context.Context, *MsgKeyValue) (*MsgCreateKeyValueResponse, error) -} - -// UnimplementedKeyValueServer can be embedded to have forward compatible implementations. -type UnimplementedKeyValueServer struct { -} - -func (*UnimplementedKeyValueServer) Set(ctx context.Context, req *MsgKeyValue) (*MsgCreateKeyValueResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Set not implemented") -} - -func RegisterKeyValueServer(s grpc1.Server, srv KeyValueServer) { - s.RegisterService(&_KeyValue_serviceDesc, srv) -} - -func _KeyValue_Set_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgKeyValue) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(KeyValueServer).Set(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/KeyValue/Set", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(KeyValueServer).Set(ctx, req.(*MsgKeyValue)) - } - return interceptor(ctx, in, info, handler) -} - -var _KeyValue_serviceDesc = grpc.ServiceDesc{ - ServiceName: "KeyValue", - HandlerType: (*KeyValueServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Set", - Handler: _KeyValue_Set_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "messages.proto", -} - -func (m *MsgCounter) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCounter) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCounter) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Signer) > 0 { - i -= len(m.Signer) - copy(dAtA[i:], m.Signer) - i = encodeVarintMessages(dAtA, i, uint64(len(m.Signer))) - i-- - dAtA[i] = 0x1a - } - if m.FailOnHandler { - i-- - if m.FailOnHandler { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if m.Counter != 0 { - i = encodeVarintMessages(dAtA, i, uint64(m.Counter)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *MsgCounter2) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCounter2) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCounter2) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Signer) > 0 { - i -= len(m.Signer) - copy(dAtA[i:], m.Signer) - i = encodeVarintMessages(dAtA, i, uint64(len(m.Signer))) - i-- - dAtA[i] = 0x1a - } - if m.FailOnHandler { - i-- - if m.FailOnHandler { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if m.Counter != 0 { - i = encodeVarintMessages(dAtA, i, uint64(m.Counter)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *MsgCreateCounterResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCreateCounterResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCreateCounterResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgKeyValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgKeyValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgKeyValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Signer) > 0 { - i -= len(m.Signer) - copy(dAtA[i:], m.Signer) - i = encodeVarintMessages(dAtA, i, uint64(len(m.Signer))) - i-- - dAtA[i] = 0x1a - } - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintMessages(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x12 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintMessages(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MsgCreateKeyValueResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCreateKeyValueResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCreateKeyValueResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func encodeVarintMessages(dAtA []byte, offset int, v uint64) int { - offset -= sovMessages(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *MsgCounter) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Counter != 0 { - n += 1 + sovMessages(uint64(m.Counter)) - } - if m.FailOnHandler { - n += 2 - } - l = len(m.Signer) - if l > 0 { - n += 1 + l + sovMessages(uint64(l)) - } - return n -} - -func (m *MsgCounter2) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Counter != 0 { - n += 1 + sovMessages(uint64(m.Counter)) - } - if m.FailOnHandler { - n += 2 - } - l = len(m.Signer) - if l > 0 { - n += 1 + l + sovMessages(uint64(l)) - } - return n -} - -func (m *MsgCreateCounterResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgKeyValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovMessages(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovMessages(uint64(l)) - } - l = len(m.Signer) - if l > 0 { - n += 1 + l + sovMessages(uint64(l)) - } - return n -} - -func (m *MsgCreateKeyValueResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func sovMessages(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozMessages(x uint64) (n int) { - return sovMessages(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *MsgCounter) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCounter: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCounter: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Counter", wireType) - } - m.Counter = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Counter |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FailOnHandler", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.FailOnHandler = bool(v != 0) - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMessages - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMessages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Signer = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMessages(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMessages - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCounter2) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCounter2: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCounter2: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Counter", wireType) - } - m.Counter = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Counter |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FailOnHandler", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.FailOnHandler = bool(v != 0) - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMessages - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMessages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Signer = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMessages(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMessages - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCreateCounterResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCreateCounterResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCreateCounterResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipMessages(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMessages - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgKeyValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgKeyValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgKeyValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthMessages - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthMessages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) - if m.Key == nil { - m.Key = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthMessages - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthMessages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) - if m.Value == nil { - m.Value = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMessages - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMessages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Signer = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMessages(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMessages - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCreateKeyValueResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMessages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCreateKeyValueResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCreateKeyValueResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipMessages(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMessages - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipMessages(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMessages - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMessages - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMessages - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthMessages - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupMessages - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthMessages - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthMessages = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowMessages = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupMessages = fmt.Errorf("proto: unexpected end of group") -) diff --git a/baseapp/testutil/messages.proto b/baseapp/testutil/messages.proto deleted file mode 100644 index d2b25d24c1..0000000000 --- a/baseapp/testutil/messages.proto +++ /dev/null @@ -1,47 +0,0 @@ -syntax = "proto3"; - -import "gogoproto/gogo.proto"; -import "google/protobuf/any.proto"; -import "cosmos/msg/v1/msg.proto"; - -option go_package = "github.com/cosmos/cosmos-sdk/baseapp/testutil"; - -message MsgCounter { - option (cosmos.msg.v1.signer) = "signer"; - - int64 counter = 1; - bool fail_on_handler = 2; - string signer = 3; -} - -message MsgCounter2 { - option (cosmos.msg.v1.signer) = "signer"; - - int64 counter = 1; - bool fail_on_handler = 2; - string signer = 3; -} - -message MsgCreateCounterResponse {} - -message MsgKeyValue { - option (cosmos.msg.v1.signer) = "signer"; - - bytes key = 1; - bytes value = 2; - string signer = 3; -} - -message MsgCreateKeyValueResponse {} - -service Counter { - rpc IncrementCounter(MsgCounter) returns (MsgCreateCounterResponse); -} - -service Counter2 { - rpc IncrementCounter(MsgCounter2) returns (MsgCreateCounterResponse); -} - -service KeyValue { - rpc Set(MsgKeyValue) returns (MsgCreateKeyValueResponse); -} \ No newline at end of file diff --git a/baseapp/testutil/mock/mocks.go b/baseapp/testutil/mock/mocks.go deleted file mode 100644 index 85d1cdeaf6..0000000000 --- a/baseapp/testutil/mock/mocks.go +++ /dev/null @@ -1,235 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: baseapp/abci_utils.go - -// Package mock is a generated GoMock package. -package mock - -import ( - context "context" - reflect "reflect" - - crypto "github.com/cometbft/cometbft/proto/tendermint/crypto" - types "github.com/cosmos/cosmos-sdk/types" - gomock "github.com/golang/mock/gomock" -) - -// MockValidatorStore is a mock of ValidatorStore interface. -type MockValidatorStore struct { - ctrl *gomock.Controller - recorder *MockValidatorStoreMockRecorder -} - -// MockValidatorStoreMockRecorder is the mock recorder for MockValidatorStore. -type MockValidatorStoreMockRecorder struct { - mock *MockValidatorStore -} - -// NewMockValidatorStore creates a new mock instance. -func NewMockValidatorStore(ctrl *gomock.Controller) *MockValidatorStore { - mock := &MockValidatorStore{ctrl: ctrl} - mock.recorder = &MockValidatorStoreMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockValidatorStore) EXPECT() *MockValidatorStoreMockRecorder { - return m.recorder -} - -// GetPubKeyByConsAddr mocks base method. -func (m *MockValidatorStore) GetPubKeyByConsAddr(arg0 context.Context, arg1 types.ConsAddress) (crypto.PublicKey, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetPubKeyByConsAddr", arg0, arg1) - ret0, _ := ret[0].(crypto.PublicKey) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetPubKeyByConsAddr indicates an expected call of GetPubKeyByConsAddr. -func (mr *MockValidatorStoreMockRecorder) GetPubKeyByConsAddr(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPubKeyByConsAddr", reflect.TypeOf((*MockValidatorStore)(nil).GetPubKeyByConsAddr), arg0, arg1) -} - -// MockGasTx is a mock of GasTx interface. -type MockGasTx struct { - ctrl *gomock.Controller - recorder *MockGasTxMockRecorder -} - -// MockGasTxMockRecorder is the mock recorder for MockGasTx. -type MockGasTxMockRecorder struct { - mock *MockGasTx -} - -// NewMockGasTx creates a new mock instance. -func NewMockGasTx(ctrl *gomock.Controller) *MockGasTx { - mock := &MockGasTx{ctrl: ctrl} - mock.recorder = &MockGasTxMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockGasTx) EXPECT() *MockGasTxMockRecorder { - return m.recorder -} - -// GetGas mocks base method. -func (m *MockGasTx) GetGas() uint64 { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetGas") - ret0, _ := ret[0].(uint64) - return ret0 -} - -// GetGas indicates an expected call of GetGas. -func (mr *MockGasTxMockRecorder) GetGas() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGas", reflect.TypeOf((*MockGasTx)(nil).GetGas)) -} - -// MockProposalTxVerifier is a mock of ProposalTxVerifier interface. -type MockProposalTxVerifier struct { - ctrl *gomock.Controller - recorder *MockProposalTxVerifierMockRecorder -} - -// MockProposalTxVerifierMockRecorder is the mock recorder for MockProposalTxVerifier. -type MockProposalTxVerifierMockRecorder struct { - mock *MockProposalTxVerifier -} - -// NewMockProposalTxVerifier creates a new mock instance. -func NewMockProposalTxVerifier(ctrl *gomock.Controller) *MockProposalTxVerifier { - mock := &MockProposalTxVerifier{ctrl: ctrl} - mock.recorder = &MockProposalTxVerifierMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockProposalTxVerifier) EXPECT() *MockProposalTxVerifierMockRecorder { - return m.recorder -} - -// PrepareProposalVerifyTx mocks base method. -func (m *MockProposalTxVerifier) PrepareProposalVerifyTx(tx types.Tx) ([]byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PrepareProposalVerifyTx", tx) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// PrepareProposalVerifyTx indicates an expected call of PrepareProposalVerifyTx. -func (mr *MockProposalTxVerifierMockRecorder) PrepareProposalVerifyTx(tx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrepareProposalVerifyTx", reflect.TypeOf((*MockProposalTxVerifier)(nil).PrepareProposalVerifyTx), tx) -} - -// ProcessProposalVerifyTx mocks base method. -func (m *MockProposalTxVerifier) ProcessProposalVerifyTx(txBz []byte) (types.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ProcessProposalVerifyTx", txBz) - ret0, _ := ret[0].(types.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ProcessProposalVerifyTx indicates an expected call of ProcessProposalVerifyTx. -func (mr *MockProposalTxVerifierMockRecorder) ProcessProposalVerifyTx(txBz interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ProcessProposalVerifyTx", reflect.TypeOf((*MockProposalTxVerifier)(nil).ProcessProposalVerifyTx), txBz) -} - -// TxDecode mocks base method. -func (m *MockProposalTxVerifier) TxDecode(txBz []byte) (types.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "TxDecode", txBz) - ret0, _ := ret[0].(types.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// TxDecode indicates an expected call of TxDecode. -func (mr *MockProposalTxVerifierMockRecorder) TxDecode(txBz interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TxDecode", reflect.TypeOf((*MockProposalTxVerifier)(nil).TxDecode), txBz) -} - -// TxEncode mocks base method. -func (m *MockProposalTxVerifier) TxEncode(tx types.Tx) ([]byte, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "TxEncode", tx) - ret0, _ := ret[0].([]byte) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// TxEncode indicates an expected call of TxEncode. -func (mr *MockProposalTxVerifierMockRecorder) TxEncode(tx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TxEncode", reflect.TypeOf((*MockProposalTxVerifier)(nil).TxEncode), tx) -} - -// MockTxSelector is a mock of TxSelector interface. -type MockTxSelector struct { - ctrl *gomock.Controller - recorder *MockTxSelectorMockRecorder -} - -// MockTxSelectorMockRecorder is the mock recorder for MockTxSelector. -type MockTxSelectorMockRecorder struct { - mock *MockTxSelector -} - -// NewMockTxSelector creates a new mock instance. -func NewMockTxSelector(ctrl *gomock.Controller) *MockTxSelector { - mock := &MockTxSelector{ctrl: ctrl} - mock.recorder = &MockTxSelectorMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockTxSelector) EXPECT() *MockTxSelectorMockRecorder { - return m.recorder -} - -// Clear mocks base method. -func (m *MockTxSelector) Clear() { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Clear") -} - -// Clear indicates an expected call of Clear. -func (mr *MockTxSelectorMockRecorder) Clear() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Clear", reflect.TypeOf((*MockTxSelector)(nil).Clear)) -} - -// SelectTxForProposal mocks base method. -func (m *MockTxSelector) SelectTxForProposal(ctx context.Context, maxTxBytes, maxBlockGas uint64, memTx types.Tx, txBz []byte) bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SelectTxForProposal", ctx, maxTxBytes, maxBlockGas, memTx, txBz) - ret0, _ := ret[0].(bool) - return ret0 -} - -// SelectTxForProposal indicates an expected call of SelectTxForProposal. -func (mr *MockTxSelectorMockRecorder) SelectTxForProposal(ctx, maxTxBytes, maxBlockGas, memTx, txBz interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SelectTxForProposal", reflect.TypeOf((*MockTxSelector)(nil).SelectTxForProposal), ctx, maxTxBytes, maxBlockGas, memTx, txBz) -} - -// SelectedTxs mocks base method. -func (m *MockTxSelector) SelectedTxs(ctx context.Context) [][]byte { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SelectedTxs", ctx) - ret0, _ := ret[0].([][]byte) - return ret0 -} - -// SelectedTxs indicates an expected call of SelectedTxs. -func (mr *MockTxSelectorMockRecorder) SelectedTxs(ctx interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SelectedTxs", reflect.TypeOf((*MockTxSelector)(nil).SelectedTxs), ctx) -} diff --git a/baseapp/utils_test.go b/baseapp/utils_test.go deleted file mode 100644 index 81a1dfe5fb..0000000000 --- a/baseapp/utils_test.go +++ /dev/null @@ -1,392 +0,0 @@ -package baseapp_test - -import ( - "bytes" - "context" - "encoding/binary" - "encoding/json" - "errors" - "fmt" - "net/url" - "reflect" - "strconv" - "testing" - "unsafe" - - cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" - cmttypes "github.com/cometbft/cometbft/types" - dbm "github.com/cosmos/cosmos-db" - "github.com/stretchr/testify/require" - - runtimev1alpha1 "cosmossdk.io/api/cosmos/app/runtime/v1alpha1" - appv1alpha1 "cosmossdk.io/api/cosmos/app/v1alpha1" - "cosmossdk.io/core/address" - "cosmossdk.io/core/appconfig" - "cosmossdk.io/depinject" - errorsmod "cosmossdk.io/errors" - "cosmossdk.io/math" - storetypes "cosmossdk.io/store/types" - - "github.com/cosmos/cosmos-sdk/baseapp" - baseapptestutil "github.com/cosmos/cosmos-sdk/baseapp/testutil" - "github.com/cosmos/cosmos-sdk/client" - "github.com/cosmos/cosmos-sdk/codec" - addresscodec "github.com/cosmos/cosmos-sdk/codec/address" - "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1" - "github.com/cosmos/cosmos-sdk/runtime" - "github.com/cosmos/cosmos-sdk/testutil/mock" - simtestutil "github.com/cosmos/cosmos-sdk/testutil/sims" - "github.com/cosmos/cosmos-sdk/testutil/testdata" - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" - "github.com/cosmos/cosmos-sdk/types/mempool" - signingtypes "github.com/cosmos/cosmos-sdk/types/tx/signing" - _ "github.com/cosmos/cosmos-sdk/x/auth" - "github.com/cosmos/cosmos-sdk/x/auth/signing" - _ "github.com/cosmos/cosmos-sdk/x/auth/tx/config" - authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" - _ "github.com/cosmos/cosmos-sdk/x/bank" - banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" - _ "github.com/cosmos/cosmos-sdk/x/consensus" - _ "github.com/cosmos/cosmos-sdk/x/mint" - _ "github.com/cosmos/cosmos-sdk/x/params" - _ "github.com/cosmos/cosmos-sdk/x/staking" -) - -var ParamStoreKey = []byte("paramstore") - -// GenesisStateWithSingleValidator initializes GenesisState with a single validator and genesis accounts -// that also act as delegators. -func GenesisStateWithSingleValidator(t *testing.T, codec codec.Codec, builder *runtime.AppBuilder) map[string]json.RawMessage { - t.Helper() - - privVal := mock.NewPV() - pubKey, err := privVal.GetPubKey() - require.NoError(t, err) - - // create validator set with single validator - validator := cmttypes.NewValidator(pubKey, 1) - valSet := cmttypes.NewValidatorSet([]*cmttypes.Validator{validator}) - - // generate genesis account - senderPrivKey := secp256k1.GenPrivKey() - acc := authtypes.NewBaseAccount(senderPrivKey.PubKey().Address().Bytes(), senderPrivKey.PubKey(), 0, 0) - balances := []banktypes.Balance{ - { - Address: acc.GetAddress().String(), - Coins: sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, math.NewInt(100000000000000))), - }, - } - - genesisState := builder.DefaultGenesis() - // sus - genesisState, err = simtestutil.GenesisStateWithValSet(codec, genesisState, valSet, []authtypes.GenesisAccount{acc}, balances...) - require.NoError(t, err) - - return genesisState -} - -func makeMinimalConfig() depinject.Config { - var ( - mempoolOpt = baseapp.SetMempool(mempool.NewSenderNonceMempool()) - addressCodec = func() address.Codec { return addresscodec.NewBech32Codec("cosmos") } - validatorAddressCodec = func() runtime.ValidatorAddressCodec { return addresscodec.NewBech32Codec("cosmosvaloper") } - consensusAddressCodec = func() runtime.ConsensusAddressCodec { return addresscodec.NewBech32Codec("cosmosvalcons") } - ) - - return depinject.Configs( - depinject.Supply(mempoolOpt, addressCodec, validatorAddressCodec, consensusAddressCodec), - appconfig.Compose(&appv1alpha1.Config{ - Modules: []*appv1alpha1.ModuleConfig{ - { - Name: "runtime", - Config: appconfig.WrapAny(&runtimev1alpha1.Module{ - AppName: "BaseAppApp", - }), - }, - }, - })) -} - -type MsgKeyValueImpl struct{} - -func (m MsgKeyValueImpl) Set(ctx context.Context, msg *baseapptestutil.MsgKeyValue) (*baseapptestutil.MsgCreateKeyValueResponse, error) { - sdkCtx := sdk.UnwrapSDKContext(ctx) - sdkCtx.KVStore(capKey2).Set(msg.Key, msg.Value) - return &baseapptestutil.MsgCreateKeyValueResponse{}, nil -} - -type CounterServerImplGasMeterOnly struct { - gas uint64 -} - -func (m CounterServerImplGasMeterOnly) IncrementCounter(ctx context.Context, msg *baseapptestutil.MsgCounter) (*baseapptestutil.MsgCreateCounterResponse, error) { - sdkCtx := sdk.UnwrapSDKContext(ctx) - gas := m.gas - - // if no gas is provided, use the counter as gas. This is useful for testing - if gas == 0 { - gas = uint64(msg.Counter) - } - - sdkCtx.GasMeter().ConsumeGas(gas, "test") - return &baseapptestutil.MsgCreateCounterResponse{}, nil -} - -type NoopCounterServerImpl struct{} - -func (m NoopCounterServerImpl) IncrementCounter( - _ context.Context, - _ *baseapptestutil.MsgCounter, -) (*baseapptestutil.MsgCreateCounterResponse, error) { - return &baseapptestutil.MsgCreateCounterResponse{}, nil -} - -type CounterServerImpl struct { - t *testing.T - capKey storetypes.StoreKey - deliverKey []byte -} - -func (m CounterServerImpl) IncrementCounter(ctx context.Context, msg *baseapptestutil.MsgCounter) (*baseapptestutil.MsgCreateCounterResponse, error) { - return incrementCounter(ctx, m.t, m.capKey, m.deliverKey, msg) -} - -type Counter2ServerImpl struct { - t *testing.T - capKey storetypes.StoreKey - deliverKey []byte -} - -func (m Counter2ServerImpl) IncrementCounter(ctx context.Context, msg *baseapptestutil.MsgCounter2) (*baseapptestutil.MsgCreateCounterResponse, error) { - return incrementCounter(ctx, m.t, m.capKey, m.deliverKey, msg) -} - -func incrementCounter(ctx context.Context, - t *testing.T, - capKey storetypes.StoreKey, - deliverKey []byte, - msg sdk.Msg, -) (*baseapptestutil.MsgCreateCounterResponse, error) { - sdkCtx := sdk.UnwrapSDKContext(ctx) - store := sdkCtx.KVStore(capKey) - - sdkCtx.GasMeter().ConsumeGas(5, "test") - - var msgCount int64 - - switch m := msg.(type) { - case *baseapptestutil.MsgCounter: - if m.FailOnHandler { - return nil, errorsmod.Wrap(sdkerrors.ErrInvalidRequest, "message handler failure") - } - msgCount = m.Counter - case *baseapptestutil.MsgCounter2: - if m.FailOnHandler { - return nil, errorsmod.Wrap(sdkerrors.ErrInvalidRequest, "message handler failure") - } - msgCount = m.Counter - } - - sdkCtx.EventManager().EmitEvents( - counterEvent(sdk.EventTypeMessage, msgCount), - ) - - _, err := incrementingCounter(t, store, deliverKey, msgCount) - if err != nil { - return nil, err - } - - return &baseapptestutil.MsgCreateCounterResponse{}, nil -} - -func counterEvent(evType string, msgCount int64) sdk.Events { - return sdk.Events{ - sdk.NewEvent( - evType, - sdk.NewAttribute("update_counter", fmt.Sprintf("%d", msgCount)), - ), - } -} - -func anteHandlerTxTest(t *testing.T, capKey storetypes.StoreKey, storeKey []byte) sdk.AnteHandler { - return func(ctx sdk.Context, tx sdk.Tx, simulate bool) (sdk.Context, error) { - store := ctx.KVStore(capKey) - counter, failOnAnte := parseTxMemo(t, tx) - - if failOnAnte { - return ctx, errorsmod.Wrap(sdkerrors.ErrUnauthorized, "ante handler failure") - } - - _, err := incrementingCounter(t, store, storeKey, counter) - if err != nil { - return ctx, err - } - - ctx.EventManager().EmitEvents( - counterEvent("ante_handler", counter), - ) - - ctx = ctx.WithPriority(testTxPriority) - return ctx, nil - } -} - -func incrementingCounter(t *testing.T, store storetypes.KVStore, counterKey []byte, counter int64) (*sdk.Result, error) { - storedCounter := getIntFromStore(t, store, counterKey) - require.Equal(t, storedCounter, counter) - setIntOnStore(store, counterKey, counter+1) - return &sdk.Result{}, nil -} - -func setIntOnStore(store storetypes.KVStore, key []byte, i int64) { - bz := make([]byte, 8) - n := binary.PutVarint(bz, i) - store.Set(key, bz[:n]) -} - -type paramStore struct { - db *dbm.MemDB -} - -var _ baseapp.ParamStore = (*paramStore)(nil) - -func (ps paramStore) Set(_ context.Context, value cmtproto.ConsensusParams) error { - bz, err := json.Marshal(value) - if err != nil { - return err - } - - return ps.db.Set(ParamStoreKey, bz) -} - -func (ps paramStore) Has(_ context.Context) (bool, error) { - return ps.db.Has(ParamStoreKey) -} - -func (ps paramStore) Get(_ context.Context) (cmtproto.ConsensusParams, error) { - bz, err := ps.db.Get(ParamStoreKey) - if err != nil { - return cmtproto.ConsensusParams{}, err - } - - if len(bz) == 0 { - return cmtproto.ConsensusParams{}, errors.New("params not found") - } - - var params cmtproto.ConsensusParams - if err := json.Unmarshal(bz, ¶ms); err != nil { - return cmtproto.ConsensusParams{}, err - } - - return params, nil -} - -func setTxSignature(t *testing.T, builder client.TxBuilder, nonce uint64) { - privKey := secp256k1.GenPrivKeyFromSecret([]byte("test")) - pubKey := privKey.PubKey() - err := builder.SetSignatures( - signingtypes.SignatureV2{ - PubKey: pubKey, - Sequence: nonce, - Data: &signingtypes.SingleSignatureData{}, - }, - ) - require.NoError(t, err) -} - -func testLoadVersionHelper(t *testing.T, app *baseapp.BaseApp, expectedHeight int64, expectedID storetypes.CommitID) { - lastHeight := app.LastBlockHeight() - lastID := app.LastCommitID() - require.Equal(t, expectedHeight, lastHeight) - require.Equal(t, expectedID, lastID) -} - -func getCheckStateCtx(app *baseapp.BaseApp) sdk.Context { - v := reflect.ValueOf(app).Elem() - f := v.FieldByName("checkState") - rf := reflect.NewAt(f.Type(), unsafe.Pointer(f.UnsafeAddr())).Elem() - return rf.MethodByName("Context").Call(nil)[0].Interface().(sdk.Context) -} - -func getFinalizeBlockStateCtx(app *baseapp.BaseApp) sdk.Context { - v := reflect.ValueOf(app).Elem() - f := v.FieldByName("finalizeBlockState") - rf := reflect.NewAt(f.Type(), unsafe.Pointer(f.UnsafeAddr())).Elem() - return rf.MethodByName("Context").Call(nil)[0].Interface().(sdk.Context) -} - -func parseTxMemo(t *testing.T, tx sdk.Tx) (counter int64, failOnAnte bool) { - txWithMemo, ok := tx.(sdk.TxWithMemo) - require.True(t, ok) - - memo := txWithMemo.GetMemo() - vals, err := url.ParseQuery(memo) - require.NoError(t, err) - - counter, err = strconv.ParseInt(vals.Get("counter"), 10, 64) - require.NoError(t, err) - - failOnAnte = vals.Get("failOnAnte") == "true" - return counter, failOnAnte -} - -func newTxCounter(t *testing.T, cfg client.TxConfig, counter int64, msgCounters ...int64) signing.Tx { - _, _, addr := testdata.KeyTestPubAddr() - msgs := make([]sdk.Msg, 0, len(msgCounters)) - for _, c := range msgCounters { - msg := &baseapptestutil.MsgCounter{Counter: c, FailOnHandler: false, Signer: addr.String()} - msgs = append(msgs, msg) - } - - builder := cfg.NewTxBuilder() - builder.SetMsgs(msgs...) - builder.SetMemo("counter=" + strconv.FormatInt(counter, 10) + "&failOnAnte=false") - setTxSignature(t, builder, uint64(counter)) - - return builder.GetTx() -} - -func getIntFromStore(t *testing.T, store storetypes.KVStore, key []byte) int64 { - bz := store.Get(key) - if len(bz) == 0 { - return 0 - } - - i, err := binary.ReadVarint(bytes.NewBuffer(bz)) - require.NoError(t, err) - - return i -} - -func setFailOnAnte(t *testing.T, cfg client.TxConfig, tx signing.Tx, failOnAnte bool) signing.Tx { - builder := cfg.NewTxBuilder() - builder.SetMsgs(tx.GetMsgs()...) - - memo := tx.GetMemo() - vals, err := url.ParseQuery(memo) - require.NoError(t, err) - - vals.Set("failOnAnte", strconv.FormatBool(failOnAnte)) - memo = vals.Encode() - builder.SetMemo(memo) - setTxSignature(t, builder, 1) - - return builder.GetTx() -} - -func setFailOnHandler(cfg client.TxConfig, tx signing.Tx, fail bool) signing.Tx { - builder := cfg.NewTxBuilder() - builder.SetMemo(tx.GetMemo()) - - msgs := tx.GetMsgs() - for i, msg := range msgs { - msgs[i] = &baseapptestutil.MsgCounter{ - Counter: msg.(*baseapptestutil.MsgCounter).Counter, - FailOnHandler: fail, - } - } - - builder.SetMsgs(msgs...) - return builder.GetTx() -} diff --git a/go.mod b/go.mod index fdd0e9458c..1bcf4d97f4 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,6 @@ require ( cosmossdk.io/log v1.2.1 cosmossdk.io/math v1.2.0 cosmossdk.io/store v1.0.1 - github.com/cockroachdb/errors v1.11.1 github.com/cometbft/cometbft v0.38.2 github.com/cosmos/cosmos-db v1.0.0 github.com/cosmos/cosmos-proto v1.0.0-beta.3 @@ -23,12 +22,8 @@ require ( github.com/golang/mock v1.6.0 github.com/golang/protobuf v1.5.3 github.com/google/go-cmp v0.6.0 - github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/hashicorp/go-metrics v0.5.1 - github.com/prometheus/client_golang v1.17.0 - github.com/prometheus/common v0.45.0 - github.com/spf13/cast v1.5.1 github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.8.4 @@ -56,6 +51,7 @@ require ( github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/chzyer/readline v1.5.1 // indirect + github.com/cockroachdb/errors v1.11.1 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect github.com/cockroachdb/pebble v0.0.0-20231101195458-481da04154d6 // indirect github.com/cockroachdb/redact v1.1.5 // indirect @@ -93,6 +89,7 @@ require ( github.com/gorilla/handlers v1.5.1 // indirect github.com/gorilla/mux v1.8.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect github.com/hashicorp/go-hclog v1.5.0 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect @@ -127,7 +124,9 @@ require ( github.com/petermattis/goid v0.0.0-20230904192822-1876fd5063bc // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_golang v1.17.0 // indirect github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.45.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/rogpeppe/go-internal v1.11.0 // indirect @@ -135,6 +134,7 @@ require ( github.com/rs/zerolog v1.31.0 // indirect github.com/sasha-s/go-deadlock v0.3.1 // indirect github.com/spf13/afero v1.9.5 // indirect + github.com/spf13/cast v1.5.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/viper v1.16.0 // indirect github.com/subosito/gotenv v1.4.2 // indirect