diff --git a/api/poktroll/proof/tx.pulsar.go b/api/poktroll/proof/tx.pulsar.go index 0f1d33850..0b815b1ae 100644 --- a/api/poktroll/proof/tx.pulsar.go +++ b/api/poktroll/proof/tx.pulsar.go @@ -4542,7 +4542,7 @@ type MsgSubmitProof struct { SupplierOperatorAddress string `protobuf:"bytes,1,opt,name=supplier_operator_address,json=supplierOperatorAddress,proto3" json:"supplier_operator_address,omitempty"` SessionHeader *session.SessionHeader `protobuf:"bytes,2,opt,name=session_header,json=sessionHeader,proto3" json:"session_header,omitempty"` - // serialized version of *smt.SparseMerkleClosestProof + // serialized version of *smt.SparseCompactMerkleClosestProof Proof []byte `protobuf:"bytes,3,opt,name=proof,proto3" json:"proof,omitempty"` } diff --git a/api/poktroll/proof/types.pulsar.go b/api/poktroll/proof/types.pulsar.go index b1eddf0fd..cace1d1e4 100644 --- a/api/poktroll/proof/types.pulsar.go +++ b/api/poktroll/proof/types.pulsar.go @@ -1272,7 +1272,7 @@ type Proof struct { SupplierOperatorAddress string `protobuf:"bytes,1,opt,name=supplier_operator_address,json=supplierOperatorAddress,proto3" json:"supplier_operator_address,omitempty"` // The session header of the session that this claim is for. SessionHeader *session.SessionHeader `protobuf:"bytes,2,opt,name=session_header,json=sessionHeader,proto3" json:"session_header,omitempty"` - // The serialized SMST proof from the `#ClosestProof()` method. + // The serialized SMST compacted proof from the `#ClosestProof()` method. ClosestMerkleProof []byte `protobuf:"bytes,3,opt,name=closest_merkle_proof,json=closestMerkleProof,proto3" json:"closest_merkle_proof,omitempty"` } diff --git a/pkg/client/interface.go b/pkg/client/interface.go index e240eba9a..502773709 100644 --- a/pkg/client/interface.go +++ b/pkg/client/interface.go @@ -11,6 +11,7 @@ //go:generate mockgen -destination=../../testutil/mockclient/proof_query_client_mock.go -package=mockclient . ProofQueryClient //go:generate mockgen -destination=../../testutil/mockclient/tokenomics_query_client_mock.go -package=mockclient . TokenomicsQueryClient //go:generate mockgen -destination=../../testutil/mockclient/service_query_client_mock.go -package=mockclient . ServiceQueryClient +//go:generate mockgen -destination=../../testutil/mockclient/bank_query_client_mock.go -package=mockclient . BankQueryClient //go:generate mockgen -destination=../../testutil/mockclient/cosmos_tx_builder_mock.go -package=mockclient github.com/cosmos/cosmos-sdk/client TxBuilder //go:generate mockgen -destination=../../testutil/mockclient/cosmos_keyring_mock.go -package=mockclient github.com/cosmos/cosmos-sdk/crypto/keyring Keyring //go:generate mockgen -destination=../../testutil/mockclient/cosmos_client_mock.go -package=mockclient github.com/cosmos/cosmos-sdk/client AccountRetriever @@ -64,11 +65,9 @@ type SupplierClient interface { ctx context.Context, claimMsgs ...MsgCreateClaim, ) error - // SubmitProof sends proof messages which contain the smt.SparseMerkleClosestProof, + // SubmitProof sends proof messages which contain the smt.SparseCompactMerkleClosestProof, // corresponding to some previously created claim for the same session. // The proof is validated on-chain as part of the pocket protocol. - // TODO_MAINNET(#427): Use SparseCompactClosestProof here to reduce - // the amount of data stored on-chain. SubmitProofs( ctx context.Context, sessionProofs ...MsgSubmitProof, @@ -371,3 +370,10 @@ type ServiceQueryClient interface { // GetService queries the chain for the details of the service provided GetService(ctx context.Context, serviceId string) (sharedtypes.Service, error) } + +// BankQueryClient defines an interface that enables the querying of the +// on-chain bank information +type BankQueryClient interface { + // GetBalance queries the chain for the uPOKT balance of the account provided + GetBalance(ctx context.Context, address string) (*cosmostypes.Coin, error) +} diff --git a/pkg/client/query/bankquerier.go b/pkg/client/query/bankquerier.go new file mode 100644 index 000000000..198e5a3d5 --- /dev/null +++ b/pkg/client/query/bankquerier.go @@ -0,0 +1,57 @@ +package query + +import ( + "context" + + "cosmossdk.io/depinject" + sdk "github.com/cosmos/cosmos-sdk/types" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + grpc "github.com/cosmos/gogoproto/grpc" + + "github.com/pokt-network/poktroll/app/volatile" + "github.com/pokt-network/poktroll/pkg/client" +) + +var _ client.BankQueryClient = (*bankQuerier)(nil) + +// bankQuerier is a wrapper around the banktypes.QueryClient that enables the +// querying of on-chain balance information. +type bankQuerier struct { + clientConn grpc.ClientConn + bankQuerier banktypes.QueryClient +} + +// NewBankQuerier returns a new instance of a client.BankQueryClient by +// injecting the dependecies provided by the depinject.Config. +// +// Required dependencies: +// - clientCtx +func NewBankQuerier(deps depinject.Config) (client.BankQueryClient, error) { + bq := &bankQuerier{} + + if err := depinject.Inject( + deps, + &bq.clientConn, + ); err != nil { + return nil, err + } + + bq.bankQuerier = banktypes.NewQueryClient(bq.clientConn) + + return bq, nil +} + +// GetBalance returns the uPOKT balance of a given address +func (bq *bankQuerier) GetBalance( + ctx context.Context, + address string, +) (*sdk.Coin, error) { + // Query the blockchain for the balance record + req := &banktypes.QueryBalanceRequest{Address: address, Denom: volatile.DenomuPOKT} + res, err := bq.bankQuerier.Balance(ctx, req) + if err != nil { + return nil, ErrQueryBalanceNotFound.Wrapf("address: %s [%s]", address, err) + } + + return res.Balance, nil +} diff --git a/pkg/client/query/errors.go b/pkg/client/query/errors.go index 19b4893bf..f1d91249b 100644 --- a/pkg/client/query/errors.go +++ b/pkg/client/query/errors.go @@ -10,4 +10,5 @@ var ( ErrQueryPubKeyNotFound = sdkerrors.Register(codespace, 4, "account pub key not found") ErrQuerySessionParams = sdkerrors.Register(codespace, 5, "unable to query session params") ErrQueryRetrieveService = sdkerrors.Register(codespace, 6, "error while trying to retrieve a service") + ErrQueryBalanceNotFound = sdkerrors.Register(codespace, 7, "balance not found") ) diff --git a/pkg/deps/config/suppliers.go b/pkg/deps/config/suppliers.go index 494dfa453..aceb81410 100644 --- a/pkg/deps/config/suppliers.go +++ b/pkg/deps/config/suppliers.go @@ -502,6 +502,24 @@ func NewSupplyServiceQueryClientFn() SupplierFn { } } +// NewSupplyBankQuerierFn supplies a depinject config with an BankQuerier. +func NewSupplyBankQuerierFn() SupplierFn { + return func( + _ context.Context, + deps depinject.Config, + _ *cobra.Command, + ) (depinject.Config, error) { + // Create the bank querier. + bankQuerier, err := query.NewBankQuerier(deps) + if err != nil { + return nil, err + } + + // Supply the bank querier to the provided deps + return depinject.Configs(deps, depinject.Supply(bankQuerier)), nil + } +} + // newSupplyTxClientFn returns a new depinject.Config which is supplied with // the given deps and the new TxClient. func newSupplyTxClientsFn(ctx context.Context, deps depinject.Config, signingKeyName string) (depinject.Config, error) { diff --git a/pkg/relayer/cmd/cmd.go b/pkg/relayer/cmd/cmd.go index acd31896b..66681b881 100644 --- a/pkg/relayer/cmd/cmd.go +++ b/pkg/relayer/cmd/cmd.go @@ -197,6 +197,7 @@ func setupRelayerDependencies( config.NewSupplyTokenomicsQueryClientFn(), supplyMiner, config.NewSupplyAccountQuerierFn(), + config.NewSupplyBankQuerierFn(), config.NewSupplyApplicationQuerierFn(), config.NewSupplySupplierQuerierFn(), config.NewSupplySessionQuerierFn(), diff --git a/pkg/relayer/interface.go b/pkg/relayer/interface.go index 7f4c953cf..1f231e7f1 100644 --- a/pkg/relayer/interface.go +++ b/pkg/relayer/interface.go @@ -128,7 +128,7 @@ type SessionTree interface { // ProveClosest is a wrapper for the SMST's ProveClosest function. It returns the // proof for the given path. // This function should be called several blocks after a session has been claimed and needs to be proven. - ProveClosest(path []byte) (proof *smt.SparseMerkleClosestProof, err error) + ProveClosest(path []byte) (proof *smt.SparseCompactMerkleClosestProof, err error) // GetClaimRoot returns the root hash of the SMST needed for creating the claim. GetClaimRoot() []byte @@ -158,4 +158,7 @@ type SessionTree interface { // GetSupplierOperatorAddress returns the supplier operator address building this tree. GetSupplierOperatorAddress() *cosmostypes.AccAddress + + // GetTrieSpec returns the trie spec of the SMST. + GetTrieSpec() smt.TrieSpec } diff --git a/pkg/relayer/session/claim.go b/pkg/relayer/session/claim.go index 08421517a..ef758c28d 100644 --- a/pkg/relayer/session/claim.go +++ b/pkg/relayer/session/claim.go @@ -3,6 +3,7 @@ package session import ( "context" "fmt" + "slices" "github.com/pokt-network/poktroll/pkg/client" "github.com/pokt-network/poktroll/pkg/either" @@ -13,6 +14,7 @@ import ( "github.com/pokt-network/poktroll/pkg/relayer" prooftypes "github.com/pokt-network/poktroll/x/proof/types" "github.com/pokt-network/poktroll/x/shared" + "github.com/pokt-network/smt" ) // createClaims maps over the sessionsToClaimObs observable. For each claim batch, it: @@ -180,13 +182,16 @@ func (rs *relayerSessionsManager) newMapClaimSessionsFn( return either.Success(sessionTrees), false } - // TODO_FOLLOWUP(@red-0ne): Ensure that the supplier operator account - // has enough funds to cover for any potential proof submission in order to - // avoid slashing due to missing proofs. - // We should order the claimMsgs by reward amount and include claims up to - // whatever the supplier can afford to cover. - claimMsgs := make([]client.MsgCreateClaim, len(sessionTrees)) - for idx, sessionTree := range sessionTrees { + // Filter out the session trees that the supplier operator can afford to claim. + claimableSessionTrees, err := rs.payableProofsSessionTrees(ctx, sessionTrees) + if err != nil { + failedCreateClaimsSessionsPublishCh <- sessionTrees + rs.logger.Error().Err(err).Msg("failed to calculate payable proofs session trees") + return either.Error[[]relayer.SessionTree](err), false + } + + claimMsgs := make([]client.MsgCreateClaim, len(claimableSessionTrees)) + for idx, sessionTree := range claimableSessionTrees { claimMsgs[idx] = &prooftypes.MsgCreateClaim{ RootHash: sessionTree.GetClaimRoot(), SessionHeader: sessionTree.GetSessionHeader(), @@ -196,12 +201,12 @@ func (rs *relayerSessionsManager) newMapClaimSessionsFn( // Create claims for each supplier operator address in `sessionTrees`. if err := supplierClient.CreateClaims(ctx, claimMsgs...); err != nil { - failedCreateClaimsSessionsPublishCh <- sessionTrees + failedCreateClaimsSessionsPublishCh <- claimableSessionTrees rs.logger.Error().Err(err).Msg("failed to create claims") return either.Error[[]relayer.SessionTree](err), false } - return either.Success(sessionTrees), false + return either.Success(claimableSessionTrees), false } } @@ -235,3 +240,85 @@ func (rs *relayerSessionsManager) goCreateClaimRoots( failSubmitProofsSessionsCh <- failedClaims claimsFlushedCh <- flushedClaims } + +// payableProofsSessionTrees returns the session trees that the supplier operator +// can afford to claim (i.e. pay the fee for submitting a proof). +// The session trees are sorted from the most rewarding to the least rewarding to +// ensure optimal rewards in the case of insufficient funds. +// Note that all sessionTrees are associated with the same supplier operator address. +func (rs *relayerSessionsManager) payableProofsSessionTrees( + ctx context.Context, + sessionTrees []relayer.SessionTree, +) ([]relayer.SessionTree, error) { + supplierOpeartorAddress := sessionTrees[0].GetSupplierOperatorAddress().String() + logger := rs.logger.With( + "supplier_operator_address", supplierOpeartorAddress, + ) + + proofParams, err := rs.proofQueryClient.GetParams(ctx) + if err != nil { + return nil, err + } + proofSubmissionFeeCoin := proofParams.GetProofSubmissionFee() + + supplierOperatorBalanceCoin, err := rs.bankQueryClient.GetBalance( + ctx, + sessionTrees[0].GetSupplierOperatorAddress().String(), + ) + if err != nil { + return nil, err + } + + // Sort the session trees by the sum of the claim root to ensure that the + // most rewarding claims are claimed first. + slices.SortFunc(sessionTrees, func(a, b relayer.SessionTree) int { + rootA := a.GetClaimRoot() + sumA, errA := smt.MerkleSumRoot(rootA).Sum() + if errA != nil { + logger.With( + "session_id", a.GetSessionHeader().GetSessionId(), + "claim_root", fmt.Sprintf("%x", rootA), + ).Error().Err(errA).Msg("failed to calculate sum of claim root, assuming 0") + sumA = 0 + } + + rootB := b.GetClaimRoot() + sumB, errB := smt.MerkleSumRoot(rootB).Sum() + if errB != nil { + logger.With( + "session_id", a.GetSessionHeader().GetSessionId(), + "claim_root", fmt.Sprintf("%x", rootA), + ).Error().Err(errB).Msg("failed to calculate sum of claim root, assuming 0") + sumB = 0 + } + + // Sort in descending order. + return int(sumB - sumA) + }) + + claimableSessionTrees := []relayer.SessionTree{} + for _, sessionTree := range sessionTrees { + // If the supplier operator can afford to claim the session, add it to the + // claimableSessionTrees slice. + if supplierOperatorBalanceCoin.IsGTE(*proofSubmissionFeeCoin) { + claimableSessionTrees = append(claimableSessionTrees, sessionTree) + newSupplierOperatorBalanceCoin := supplierOperatorBalanceCoin.Sub(*proofSubmissionFeeCoin) + supplierOperatorBalanceCoin = &newSupplierOperatorBalanceCoin + continue + } + + // Log a warning of any session that the supplier operator cannot afford to claim. + logger.With( + "session_id", sessionTree.GetSessionHeader().GetSessionId(), + "supplier_operator_balance", supplierOperatorBalanceCoin, + "proof_submission_fee", proofSubmissionFeeCoin, + ).Warn().Msg("supplier operator cannot afford to submit proof for claim, skipping") + } + + logger.Warn().Msgf( + "Supplier operator %q can only afford %d out of %d claims", + supplierOpeartorAddress, len(claimableSessionTrees), len(sessionTrees), + ) + + return claimableSessionTrees, nil +} diff --git a/pkg/relayer/session/session.go b/pkg/relayer/session/session.go index 625d09433..15fce0220 100644 --- a/pkg/relayer/session/session.go +++ b/pkg/relayer/session/session.go @@ -67,6 +67,9 @@ type relayerSessionsManager struct { // tokenomicsQueryClient is used to query for the tokenomics module parameters. tokenomicsQueryClient client.TokenomicsQueryClient + + // bankQueryClient is used to query for the bank module parameters. + bankQueryClient client.BankQueryClient } // NewRelayerSessions creates a new relayerSessions. @@ -98,6 +101,7 @@ func NewRelayerSessions( &rs.serviceQueryClient, &rs.proofQueryClient, &rs.tokenomicsQueryClient, + &rs.bankQueryClient, ); err != nil { return nil, err } diff --git a/pkg/relayer/session/session_test.go b/pkg/relayer/session/session_test.go index 5b1442e5b..5b3c24f2f 100644 --- a/pkg/relayer/session/session_test.go +++ b/pkg/relayer/session/session_test.go @@ -2,6 +2,7 @@ package session_test import ( "context" + "fmt" "math" "testing" "time" @@ -17,6 +18,7 @@ import ( "github.com/pokt-network/poktroll/app/volatile" "github.com/pokt-network/poktroll/pkg/client" + "github.com/pokt-network/poktroll/pkg/client/supplier" "github.com/pokt-network/poktroll/pkg/crypto/protocol" "github.com/pokt-network/poktroll/pkg/observable/channel" "github.com/pokt-network/poktroll/pkg/polylog/polyzero" @@ -68,13 +70,218 @@ func requireProofCountEqualsExpectedValueFromProofParams(t *testing.T, proofPara ServiceId: service.Id, }, } - sessionHeader := activeSession.GetHeader() + supplierOperatorAddress := sample.AccAddress() + // Set the supplier operator balance to be able to submit the expected number of proofs. + feePerProof := prooftypes.DefaultParams().ProofSubmissionFee.Amount.Int64() + numExpectedProofs := int64(2) + supplierOperatorBalance := feePerProof * numExpectedProofs + supplierClientMap := testsupplier.NewClaimProofSupplierClientMap(ctx, t, supplierOperatorAddress, proofCount) + blockPublishCh, minedRelaysPublishCh := setupDependencies(t, ctx, supplierClientMap, emptyBlockHash, proofParams, supplierOperatorBalance) + + // Publish a mined relay to the minedRelaysPublishCh to insert into the session tree. + minedRelay := testrelayer.NewUnsignedMinedRelay(t, activeSession, supplierOperatorAddress) + minedRelaysPublishCh <- minedRelay + + // The relayerSessionsManager should have created a session tree for the relay. + waitSimulateIO() + + // Publish a block to the blockPublishCh to simulate non-actionable blocks. + sessionStartHeight := activeSession.GetHeader().GetSessionStartBlockHeight() + sessionEndHeight := activeSession.GetHeader().GetSessionEndBlockHeight() + + playClaimAndProofSubmissionBlocks(t, sessionStartHeight, sessionEndHeight, supplierOperatorAddress, emptyBlockHash, blockPublishCh) +} + +func TestRelayerSessionsManager_ProofThresholdRequired(t *testing.T) { + proofParams := prooftypes.DefaultParams() + + // Set proof requirement threshold to a low enough value so a proof is always requested. + proofParams.ProofRequirementThreshold = uPOKTCoin(1) + + // The test is submitting a single claim. Having the proof requirement threshold + // set to 1 results in exactly 1 proof being requested. + numExpectedProofs := 1 + + requireProofCountEqualsExpectedValueFromProofParams(t, proofParams, numExpectedProofs) +} + +func TestRelayerSessionsManager_ProofProbabilityRequired(t *testing.T) { + proofParams := prooftypes.DefaultParams() + + // Set proof requirement threshold to max int64 to skip the threshold check. + proofParams.ProofRequirementThreshold = uPOKTCoin(math.MaxInt64) + // Set proof request probability to 1 so a proof is always requested. + proofParams.ProofRequestProbability = 1 + + // The test is submitting a single claim. Having the proof request probability + // set to 1 results in exactly 1 proof being requested. + numExpectedProofs := 1 + + requireProofCountEqualsExpectedValueFromProofParams(t, proofParams, numExpectedProofs) +} + +func TestRelayerSessionsManager_ProofNotRequired(t *testing.T) { + proofParams := prooftypes.DefaultParams() + + // Set proof requirement threshold to max int64 to skip the threshold check. + proofParams.ProofRequirementThreshold = uPOKTCoin(math.MaxInt64) + // Set proof request probability to 0 so a proof is never requested. + proofParams.ProofRequestProbability = 0 + + // The test is submitting a single claim. Having the proof request probability + // set to 0 and proof requirement threshold set to max uint64 results in no proofs + // being requested. + numExpectedProofs := 0 + + requireProofCountEqualsExpectedValueFromProofParams(t, proofParams, numExpectedProofs) +} + +func TestRelayerSessionsManager_InsufficientBalanceForProofSubmission(t *testing.T) { + var ( + _, ctx = testpolylog.NewLoggerWithCtx(context.Background(), polyzero.DebugLevel) + spec = smt.NewTrieSpec(protocol.NewTrieHasher(), true) + emptyBlockHash = make([]byte, spec.PathHasherSize()) + ) + + proofParams := prooftypes.DefaultParams() + + // Set proof requirement threshold to a low enough value so a proof is always requested. + proofParams.ProofRequirementThreshold = uPOKTCoin(1) + + // * Add 2 services with different CUPRs + // * Create 2 claims with the same number of mined relays, each claim for a different service. + // * Assert that only the claim of the highest CUPR service get its proof submitted. + + lowCUPRService := sharedtypes.Service{ + Id: "lowCUPRService", + ComputeUnitsPerRelay: 1, + } + testqueryclients.AddToExistingServices(t, lowCUPRService) + + highCUPRService := sharedtypes.Service{ + Id: "highCUPRService", + ComputeUnitsPerRelay: 2, + } + testqueryclients.AddToExistingServices(t, highCUPRService) + + lowCUPRServiceActiveSession := &sessiontypes.Session{ + Header: &sessiontypes.SessionHeader{ + SessionStartBlockHeight: 1, + SessionEndBlockHeight: 2, + ServiceId: lowCUPRService.Id, + SessionId: fmt.Sprintf("%sSessionId", lowCUPRService.Id), + }, + } + + highCUPRServiceActiveSession := &sessiontypes.Session{ + Header: &sessiontypes.SessionHeader{ + SessionStartBlockHeight: 1, + SessionEndBlockHeight: 2, + ServiceId: highCUPRService.Id, + SessionId: fmt.Sprintf("%sSessionId", highCUPRService.Id), + }, + } + + // Assert that the session start and end block heights are the same for both + // services and use their common start and end block heights for the test. + + require.Equal(t, + lowCUPRServiceActiveSession.GetHeader().GetSessionStartBlockHeight(), + highCUPRServiceActiveSession.GetHeader().GetSessionStartBlockHeight(), + ) + sessionStartHeight := lowCUPRServiceActiveSession.GetHeader().GetSessionStartBlockHeight() + + require.Equal(t, + lowCUPRServiceActiveSession.GetHeader().GetSessionEndBlockHeight(), + highCUPRServiceActiveSession.GetHeader().GetSessionEndBlockHeight(), + ) + sessionEndHeight := lowCUPRServiceActiveSession.GetHeader().GetSessionEndBlockHeight() + // Create a supplier client map that expects exactly 1 claim and 1 proof submission + // even though 2 claims are created. + ctrl := gomock.NewController(t) + supplierClientMock := mockclient.NewMockSupplierClient(ctrl) + + supplierOperatorAddress := sample.AccAddress() + supplierOperatorAccAddress := sdktypes.MustAccAddressFromBech32(supplierOperatorAddress) + // Set the supplier operator balance to be able to submit only a single proof. + supplierOperatorBalance := prooftypes.DefaultParams().ProofSubmissionFee.Amount.Int64() + 1 + supplierClientMock.EXPECT(). + OperatorAddress(). + Return(&supplierOperatorAccAddress). + AnyTimes() + + supplierClientMock.EXPECT(). + CreateClaims( + gomock.Eq(ctx), + gomock.AssignableToTypeOf(([]client.MsgCreateClaim)(nil)), + ). + DoAndReturn(func(ctx context.Context, claimMsgs ...*prooftypes.MsgCreateClaim) error { + // Assert that only the claim of the highest CUPR service is created. + require.Len(t, claimMsgs, 1) + require.Equal(t, claimMsgs[0].SessionHeader.ServiceId, highCUPRService.Id) + return nil + }). + Times(1) + + supplierClientMock.EXPECT(). + SubmitProofs( + gomock.Eq(ctx), + gomock.AssignableToTypeOf(([]client.MsgSubmitProof)(nil)), + ). + DoAndReturn(func(ctx context.Context, proofMsgs ...*prooftypes.MsgSubmitProof) error { + // Assert that only the proof of the highest CUPR service is created. + require.Len(t, proofMsgs, 1) + require.Equal(t, proofMsgs[0].SessionHeader.ServiceId, highCUPRService.Id) + return nil + }). + Times(1) + + supplierClientMap := supplier.NewSupplierClientMap() + supplierClientMap.SupplierClients[supplierOperatorAddress] = supplierClientMock + + blockPublishCh, minedRelaysPublishCh := setupDependencies(t, ctx, supplierClientMap, emptyBlockHash, proofParams, supplierOperatorBalance) + + // For each service, publish a mined relay to the minedRelaysPublishCh to + // insert into the session tree. + lowCUPRMinedRelay := testrelayer.NewUnsignedMinedRelay(t, lowCUPRServiceActiveSession, supplierOperatorAddress) + minedRelaysPublishCh <- lowCUPRMinedRelay + + // The relayerSessionsManager should have created a session tree for the low CUPR relay. + waitSimulateIO() + + highCUPRMinedRelay := testrelayer.NewUnsignedMinedRelay(t, highCUPRServiceActiveSession, supplierOperatorAddress) + minedRelaysPublishCh <- highCUPRMinedRelay + + // The relayerSessionsManager should have created a session tree for the high CUPR relay. + waitSimulateIO() + + playClaimAndProofSubmissionBlocks(t, sessionStartHeight, sessionEndHeight, supplierOperatorAddress, emptyBlockHash, blockPublishCh) +} + +// waitSimulateIO sleeps for a bit to allow the relayer sessions manager to +// process asynchronously. This effectively simulates I/O delays which would +// normally be present. +func waitSimulateIO() { + time.Sleep(50 * time.Millisecond) +} + +// uPOKTCoin returns a pointer to a uPOKT denomination coin with the given amount. +func uPOKTCoin(amount int64) *sdktypes.Coin { + return &sdktypes.Coin{Denom: volatile.DenomuPOKT, Amount: sdkmath.NewInt(amount)} +} + +func setupDependencies( + t *testing.T, + ctx context.Context, + supplierClientMap *supplier.SupplierClientMap, + blockHash []byte, + proofParams prooftypes.Params, + supplierOperatorBalance int64, +) (chan<- client.Block, chan<- *relayer.MinedRelay) { // Set up dependencies. blocksObs, blockPublishCh := channel.NewReplayObservable[client.Block](ctx, 20) - blockClient := testblock.NewAnyTimesCommittedBlocksSequenceBlockClient(t, emptyBlockHash, blocksObs) - supplierOperatorAddress := sample.AccAddress() - supplierClientMap := testsupplier.NewClaimProofSupplierClientMap(ctx, t, supplierOperatorAddress, proofCount) + blockClient := testblock.NewAnyTimesCommittedBlocksSequenceBlockClient(t, blockHash, blocksObs) ctrl := gomock.NewController(t) blockQueryClientMock := mockclient.NewMockCometRPC(ctrl) @@ -104,10 +311,10 @@ func requireProofCountEqualsExpectedValueFromProofParams(t *testing.T, proofPara AnyTimes() sharedQueryClientMock := testqueryclients.NewTestSharedQueryClient(t) - serviceQueryClientMock := testqueryclients.NewTestServiceQueryClient(t) proofQueryClientMock := testqueryclients.NewTestProofQueryClientWithParams(t, &proofParams) tokenomicsQueryClient := testqueryclients.NewTestTokenomicsQueryClient(t) + bankQueryClient := testqueryclients.NewTestBankQueryClientWithBalance(t, supplierOperatorBalance) deps := depinject.Supply( blockClient, @@ -117,6 +324,7 @@ func requireProofCountEqualsExpectedValueFromProofParams(t *testing.T, proofPara serviceQueryClientMock, proofQueryClientMock, tokenomicsQueryClient, + bankQueryClient, ) storesDirectoryOpt := testrelayer.WithTempStoresDirectory(t) @@ -136,47 +344,51 @@ func requireProofCountEqualsExpectedValueFromProofParams(t *testing.T, proofPara // Wait a tick to allow the relayer sessions manager to start. waitSimulateIO() - // Publish a mined relay to the minedRelaysPublishCh to insert into the session tree. - minedRelay := testrelayer.NewUnsignedMinedRelay(t, activeSession, supplierOperatorAddress) - minedRelaysPublishCh <- minedRelay - - // The relayerSessionsManager should have created a session tree for the relay. - waitSimulateIO() + return blockPublishCh, minedRelaysPublishCh +} +// playClaimAndProofSubmissionBlocks simulates the block heights at which claims and proofs +// are submitted by the supplier. It publishes blocks to the blockPublishCh to trigger +// claims and proofs creation for the session number. +func playClaimAndProofSubmissionBlocks( + t *testing.T, + sessionStartHeight, sessionEndHeight int64, + supplierOperatorAddress string, + blockHash []byte, + blockPublishCh chan<- client.Block, +) { // Publish a block to the blockPublishCh to simulate non-actionable blocks. - sessionStartHeight := sessionHeader.GetSessionStartBlockHeight() - noopBlock := testblock.NewAnyTimesBlock(t, emptyBlockHash, sessionStartHeight) + // NB: This only needs to be done once per block regardless of the number of + // services, claims and proofs. + noopBlock := testblock.NewAnyTimesBlock(t, blockHash, sessionStartHeight) blockPublishCh <- noopBlock waitSimulateIO() // Calculate the session grace period end block height to emit that block height - // to the blockPublishCh to trigger claim creation for the session. + // to the blockPublishCh to trigger session trees processing for the session number. sharedParams := sharedtypes.DefaultParams() - sessionEndHeight := sessionHeader.GetSessionEndBlockHeight() claimWindowOpenHeight := shared.GetClaimWindowOpenHeight(&sharedParams, sessionEndHeight) earliestSupplierClaimCommitHeight := shared.GetEarliestSupplierClaimCommitHeight( &sharedParams, sessionEndHeight, - emptyBlockHash, + blockHash, supplierOperatorAddress, ) - claimOpenHeightBlock := testblock.NewAnyTimesBlock(t, emptyBlockHash, claimWindowOpenHeight) + claimOpenHeightBlock := testblock.NewAnyTimesBlock(t, blockHash, claimWindowOpenHeight) blockPublishCh <- claimOpenHeightBlock waitSimulateIO() - // Publish a block to the blockPublishCh to trigger claim creation for the session. - triggerClaimBlock := testblock.NewAnyTimesBlock(t, emptyBlockHash, earliestSupplierClaimCommitHeight) + // Publish a block to the blockPublishCh to trigger claims creation for the session number. + triggerClaimBlock := testblock.NewAnyTimesBlock(t, blockHash, earliestSupplierClaimCommitHeight) blockPublishCh <- triggerClaimBlock waitSimulateIO() - // TODO_IMPROVE: ensure correctness of persisted session trees here. - proofWindowOpenHeight := shared.GetProofWindowOpenHeight(&sharedParams, sessionEndHeight) - proofPathSeedBlock := testblock.NewAnyTimesBlock(t, emptyBlockHash, proofWindowOpenHeight) + proofPathSeedBlock := testblock.NewAnyTimesBlock(t, blockHash, proofWindowOpenHeight) blockPublishCh <- proofPathSeedBlock waitSimulateIO() @@ -185,62 +397,11 @@ func requireProofCountEqualsExpectedValueFromProofParams(t *testing.T, proofPara earliestSupplierProofCommitHeight := shared.GetEarliestSupplierProofCommitHeight( &sharedParams, sessionEndHeight, - emptyBlockHash, + blockHash, supplierOperatorAddress, ) - triggerProofBlock := testblock.NewAnyTimesBlock(t, emptyBlockHash, earliestSupplierProofCommitHeight) + triggerProofBlock := testblock.NewAnyTimesBlock(t, blockHash, earliestSupplierProofCommitHeight) blockPublishCh <- triggerProofBlock waitSimulateIO() } - -func TestRelayerSessionsManager_ProofThresholdRequired(t *testing.T) { - proofParams := prooftypes.DefaultParams() - - // Set proof requirement threshold to a low enough value so a proof is always requested. - proofParams.ProofRequirementThreshold = &sdktypes.Coin{Denom: volatile.DenomuPOKT, Amount: sdkmath.NewInt(1)} - - // The test is submitting a single claim. Having the proof requirement threshold - // set to 1 results in exactly 1 proof being requested. - numExpectedProofs := 1 - - requireProofCountEqualsExpectedValueFromProofParams(t, proofParams, numExpectedProofs) -} - -func TestRelayerSessionsManager_ProofProbabilityRequired(t *testing.T) { - proofParams := prooftypes.DefaultParams() - - // Set proof requirement threshold to max int64 to skip the threshold check. - proofParams.ProofRequirementThreshold = &sdktypes.Coin{Denom: volatile.DenomuPOKT, Amount: sdkmath.NewInt(math.MaxInt64)} - // Set proof request probability to 1 so a proof is always requested. - proofParams.ProofRequestProbability = 1 - - // The test is submitting a single claim. Having the proof request probability - // set to 1 results in exactly 1 proof being requested. - numExpectedProofs := 1 - - requireProofCountEqualsExpectedValueFromProofParams(t, proofParams, numExpectedProofs) -} - -func TestRelayerSessionsManager_ProofNotRequired(t *testing.T) { - proofParams := prooftypes.DefaultParams() - - // Set proof requirement threshold to max int64 to skip the threshold check. - proofParams.ProofRequirementThreshold = &sdktypes.Coin{Denom: volatile.DenomuPOKT, Amount: sdkmath.NewInt(math.MaxInt64)} - // Set proof request probability to 0 so a proof is never requested. - proofParams.ProofRequestProbability = 0 - - // The test is submitting a single claim. Having the proof request probability - // set to 0 and proof requirement threshold set to max uint64 results in no proofs - // being requested. - numExpectedProofs := 0 - - requireProofCountEqualsExpectedValueFromProofParams(t, proofParams, numExpectedProofs) -} - -// waitSimulateIO sleeps for a bit to allow the relayer sessions manager to -// process asynchronously. This effectively simulates I/O delays which would -// normally be present. -func waitSimulateIO() { - time.Sleep(50 * time.Millisecond) -} diff --git a/pkg/relayer/session/sessiontree.go b/pkg/relayer/session/sessiontree.go index 18e3ec607..b55340224 100644 --- a/pkg/relayer/session/sessiontree.go +++ b/pkg/relayer/session/sessiontree.go @@ -50,11 +50,11 @@ type sessionTree struct { // proofPath is the path for which the proof was generated. proofPath []byte - // proof is the generated proof for the session given a proofPath. - proof *smt.SparseMerkleClosestProof + // compactProof is the generated compactProof for the session given a proofPath. + compactProof *smt.SparseCompactMerkleClosestProof - // proofBz is the marshaled proof for the session. - proofBz []byte + // compactProofBz is the marshaled proof for the session. + compactProofBz []byte // treeStore is the KVStore used to store the SMST. treeStore pebble.PebbleKVStore @@ -154,9 +154,7 @@ func (st *sessionTree) Update(key, value []byte, weight uint64) error { // This function is intended to be called after a session has been claimed and needs to be proven. // If the proof has already been generated, it returns the cached proof. // It returns an error if the SMST has not been flushed yet (the claim has not been generated) -// TODO_IMPROVE(#427): Compress the proof into a SparseCompactClosestMerkleProof -// prior to submitting to chain to reduce on-chain storage requirements for proofs. -func (st *sessionTree) ProveClosest(path []byte) (proof *smt.SparseMerkleClosestProof, err error) { +func (st *sessionTree) ProveClosest(path []byte) (compactProof *smt.SparseCompactMerkleClosestProof, err error) { st.sessionMu.Lock() defer st.sessionMu.Unlock() @@ -166,13 +164,13 @@ func (st *sessionTree) ProveClosest(path []byte) (proof *smt.SparseMerkleClosest } // If the proof has already been generated, return the cached proof. - if st.proof != nil { + if st.compactProof != nil { // Make sure the path is the same as the one for which the proof was generated. if !bytes.Equal(path, st.proofPath) { return nil, ErrSessionTreeProofPathMismatch } - return st.proof, nil + return st.compactProof, nil } // Restore the KVStore from disk since it has been closed after the claim has been generated. @@ -184,12 +182,19 @@ func (st *sessionTree) ProveClosest(path []byte) (proof *smt.SparseMerkleClosest sessionSMT := smt.ImportSparseMerkleSumTrie(st.treeStore, sha256.New(), st.claimedRoot, smt.WithValueHasher(nil)) // Generate the proof and cache it along with the path for which it was generated. - proof, err = sessionSMT.ProveClosest(path) + // There is no ProveClosest variant that generates a compact proof directly. + // Generate a regular SparseMerkleClosestProof then compact it. + proof, err := sessionSMT.ProveClosest(path) if err != nil { return nil, err } - proofBz, err := proof.Marshal() + compactProof, err = smt.CompactClosestProof(proof, &sessionSMT.TrieSpec) + if err != nil { + return nil, err + } + + compactProofBz, err := compactProof.Marshal() if err != nil { return nil, err } @@ -197,20 +202,25 @@ func (st *sessionTree) ProveClosest(path []byte) (proof *smt.SparseMerkleClosest // If no error occurred, cache the proof and the path for which it was generated. st.sessionSMT = sessionSMT st.proofPath = path - st.proof = proof - st.proofBz = proofBz + st.compactProof = compactProof + st.compactProofBz = compactProofBz - return st.proof, nil + return st.compactProof, nil } // GetProofBz returns the marshaled proof for the session. func (st *sessionTree) GetProofBz() []byte { - return st.proofBz + return st.compactProofBz +} + +// GetTrieSpec returns the trie spec of the SMST. +func (st *sessionTree) GetTrieSpec() smt.TrieSpec { + return *st.sessionSMT.Spec() } // GetProof returns the proof for the SMST if it has been generated or nil otherwise. -func (st *sessionTree) GetProof() *smt.SparseMerkleClosestProof { - return st.proof +func (st *sessionTree) GetProof() *smt.SparseCompactMerkleClosestProof { + return st.compactProof } // Flush gets the root hash of the SMST needed for submitting the claim; diff --git a/pkg/relayer/session/sessiontree_test.go b/pkg/relayer/session/sessiontree_test.go index 4e199dcfe..762ebcb1b 100644 --- a/pkg/relayer/session/sessiontree_test.go +++ b/pkg/relayer/session/sessiontree_test.go @@ -1,3 +1,105 @@ package session_test -// TODO: Add tests to the sessionTree logic +import ( + "bytes" + "compress/gzip" + "crypto/rand" + "testing" + + "github.com/pokt-network/poktroll/pkg/crypto/protocol" + "github.com/pokt-network/smt" + "github.com/pokt-network/smt/kvstore/pebble" + "github.com/stretchr/testify/require" +) + +const ( + // Test multiple SMST sizes to see how the compaction ratio changes when the number + // of leaves increases. + // maxLeafs is the maximum number of leaves to test, after which the test stops. + maxLeafs = 10000 + // Since the inserted leaves are random, we run the test for a given leaf count + // multiple times to remove the randomness bias. + numIterations = 100 +) + +// No significant performance gains were observed when using compact proofs compared +// to non-compact proofs. +// In fact, compact proofs appear to be less efficient than gzipped proofs, even +// without considering the "proof closest value" compression. +// For a sample comparison between compression and compaction ratios, see: +// https://github.com/pokt-network/poktroll/pull/823#issuecomment-2363987920 +func TestSessionTree_CompactProofsAreSmallerThanNonCompactProofs(t *testing.T) { + // Run the test for different number of leaves. + for numLeafs := 10; numLeafs <= maxLeafs; numLeafs *= 10 { + cumulativeProofSize := 0 + cumulativeCompactProofSize := 0 + cumulativeGzippedProofSize := 0 + // We run the test numIterations times for each number of leaves to remove the randomness bias. + for iteration := 0; iteration <= numIterations; iteration++ { + kvStore, err := pebble.NewKVStore("") + require.NoError(t, err) + + trie := smt.NewSparseMerkleSumTrie(kvStore, protocol.NewTrieHasher(), smt.WithValueHasher(nil)) + + // Insert numLeaf random leaves. + for i := 0; i < numLeafs; i++ { + key := make([]byte, 32) + _, err = rand.Read(key) + require.NoError(t, err) + // Insert an empty value since this does not get affected by the compaction, + // this is also to not favor proof compression that compresses the value too. + trie.Update(key, []byte{}, 1) + } + + // Generate a random path. + var path = make([]byte, 32) + _, err = rand.Read(path) + require.NoError(t, err) + + // Create the proof. + proof, err := trie.ProveClosest(path) + require.NoError(t, err) + + proofBz, err := proof.Marshal() + require.NoError(t, err) + + // Accumulate the proof size over numIterations runs. + cumulativeProofSize += len(proofBz) + + // Generate the compacted proof. + compactProof, err := smt.CompactClosestProof(proof, &trie.TrieSpec) + require.NoError(t, err) + + compactProofBz, err := compactProof.Marshal() + require.NoError(t, err) + + // Accumulate the compact proof size over numIterations runs. + cumulativeCompactProofSize += len(compactProofBz) + + // Gzip the non compacted proof. + var buf bytes.Buffer + gzipWriter := gzip.NewWriter(&buf) + _, err = gzipWriter.Write(proofBz) + require.NoError(t, err) + err = gzipWriter.Close() + require.NoError(t, err) + + // Accumulate the gzipped proof size over numIterations runs. + cumulativeGzippedProofSize += len(buf.Bytes()) + } + + // Calculate how much more efficient compact SMT proofs are compared to non-compact proofs. + compactionRatio := float32(cumulativeProofSize) / float32(cumulativeCompactProofSize) + + // Claculate how much more efficient gzipped proofs are compared to non-compact proofs. + compressionRatio := float32(cumulativeProofSize) / float32(cumulativeGzippedProofSize) + + // Gzip compression is more efficient than SMT compaction. + require.Greater(t, compressionRatio, compactionRatio) + + t.Logf( + "numLeaf=%d: compactionRatio: %f, compressionRatio: %f", + numLeafs, compactionRatio, compressionRatio, + ) + } +} diff --git a/proto/poktroll/proof/tx.proto b/proto/poktroll/proof/tx.proto index 6ac2a595e..4264f0daa 100644 --- a/proto/poktroll/proof/tx.proto +++ b/proto/poktroll/proof/tx.proto @@ -91,7 +91,7 @@ message MsgSubmitProof { string supplier_operator_address = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; poktroll.session.SessionHeader session_header = 2; - // serialized version of *smt.SparseMerkleClosestProof + // serialized version of *smt.SparseCompactMerkleClosestProof bytes proof = 3; } diff --git a/proto/poktroll/proof/types.proto b/proto/poktroll/proof/types.proto index f968e4cb8..5a67772e2 100644 --- a/proto/poktroll/proof/types.proto +++ b/proto/poktroll/proof/types.proto @@ -18,7 +18,7 @@ message Proof { string supplier_operator_address = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; // The session header of the session that this claim is for. poktroll.session.SessionHeader session_header = 2; - // The serialized SMST proof from the `#ClosestProof()` method. + // The serialized SMST compacted proof from the `#ClosestProof()` method. bytes closest_merkle_proof = 3; } diff --git a/tests/integration/tokenomics/relay_mining_integration_test.go b/tests/integration/tokenomics/relay_mining_integration_test.go index 4def06029..1771b8a32 100644 --- a/tests/integration/tokenomics/relay_mining_integration_test.go +++ b/tests/integration/tokenomics/relay_mining_integration_test.go @@ -1,5 +1,3 @@ -//go:build integration - package integration_test import ( @@ -15,7 +13,6 @@ import ( "github.com/pokt-network/poktroll/app/volatile" "github.com/pokt-network/poktroll/pkg/crypto/protocol" "github.com/pokt-network/poktroll/testutil/integration" - "github.com/pokt-network/poktroll/testutil/integration/suites" "github.com/pokt-network/poktroll/testutil/testrelayer" apptypes "github.com/pokt-network/poktroll/x/application/types" prooftypes "github.com/pokt-network/poktroll/x/proof/types" @@ -28,48 +25,52 @@ var ( // Test params. computeUnitsToTokensMultiplier = uint64(1) // keeping the math simple proofRequirementThreshold = sdk.NewInt64Coin(volatile.DenomuPOKT, 1e18) - //serviceComputeUnitsPerRelay = uint64(1) // keeping the math simple ) type RelayMiningIntegrationTestSuite struct { - suites.UpdateParamsSuite + // TODO_BETA(#826): wait for integration app & suites refactor to be merged. + // Once suites.ParamsSuite is avialable, embed it here. In the meantime, we + // MUST embed suite.Suite to avoid compilation errors. + // + // suites.ParamsSuite + suite.Suite } func (s *RelayMiningIntegrationTestSuite) SetupTest() { // Construct a fresh integration app for each test. - // TODO_BLOCKED(#826): wait for integration app & suites refactor to be merged. - //s.NewApp(s.T()) - //s.SetupTestAccounts() - //s.SetupTestAuthzGrants() + // TODO_BETA(#826): wait for integration app & suites refactor to be merged. + // s.NewApp(s.T()) + // s.SetupTestAccounts() + // s.SetupTestAuthzGrants() } func (s *RelayMiningIntegrationTestSuite) TestComputeNewDifficultyHash_RewardsReflectWorkCompleted() { // Set the shared module param compute_units_to_tokens_multiplier. - // TODO_BLOCKED(#826): wait for integration app & suites refactor to be merged. - //_, err := s.RunUpdateParam(s.T(), - // sharedtypes.ModuleName, - // sharedtypes.ParamComputeUnitsToTokensMultiplier, - // computeUnitsToTokensMultiplier, - //) - //require.NoError(s.T(), err) + // TODO_BETA(#826): wait for integration app & suites refactor to be merged. + // _, err := s.RunUpdateParam(s.T(), + // sharedtypes.ModuleName, + // sharedtypes.ParamComputeUnitsToTokensMultiplier, + // computeUnitsToTokensMultiplier, + // ) + // require.NoError(s.T(), err) // Set the proof params so we never need a proof (for simplicity of this test) - // TODO_BLOCKED(#826): wait for integration app & suites refactor to be merged. - //_, err = s.RunUpdateParam(s.T(), - // prooftypes.ModuleName, - // prooftypes.ParamProofRequestProbability, - // float32(0), - //) - //require.NoError(s.T(), err) + // TODO_BETA(#826): wait for integration app & suites refactor to be merged. + // _, err = s.RunUpdateParam(s.T(), + // prooftypes.ModuleName, + // prooftypes.ParamProofRequestProbability, + // float32(0), + // ) + // require.NoError(s.T(), err) // Set the proof requirement threshold to be VERY high. - // TODO_BLOCKED(#826): wait for integration app & suites refactor to be merged. - //_, err = s.RunUpdateParam(s.T(), - // prooftypes.ModuleName, - // prooftypes.ParamProofRequirementThreshold, - // &proofRequirementThreshold, - //) - //require.NoError(s.T(), err) + // TODO_BETA(#826): wait for integration app & suites refactor to be merged. + // _, err = s.RunUpdateParam(s.T(), + // prooftypes.ModuleName, + // prooftypes.ParamProofRequirementThreshold, + // &proofRequirementThreshold, + // ) + // require.NoError(s.T(), err) // TODO(@red-0ne, #781): Implement this test after the business logic is done. @@ -183,6 +184,6 @@ func prepareRealClaim( } func TestRelayMiningIntegrationSuite(t *testing.T) { - t.Skip("TODO_BLOCKED(#826): wait for integration app & suites refactor to be merged.") + t.Skip("TODO_BETA(#826): wait for integration app & suites refactor to be merged.") suite.Run(t, new(RelayMiningIntegrationTestSuite)) } diff --git a/testutil/integration/app.go b/testutil/integration/app.go index 3c5ac9d5d..fc7f9681f 100644 --- a/testutil/integration/app.go +++ b/testutil/integration/app.go @@ -84,21 +84,11 @@ import ( const appName = "poktroll-integration-app" var ( - // FaucetAddrStr is a random address which is funded with FaucetAmountUpokt - // coins such that it can be used as a faucet for integration tests. - FaucetAddrStr = sample.AccAddress() - // FaucetAmountUpokt is the number of upokt coins that the faucet account + // faucetAmountUpokt is the number of upokt coins that the faucet account // is funded with. - FaucetAmountUpokt = int64(math2.MaxInt64) + faucetAmountUpokt = int64(math2.MaxInt64) ) -// defaultIntegrationAppOptionFn is the default integration module function for the -// integration app. It ensures that the bank module genesis state includes the faucet -// account with a large balance. -func defaultIntegrationAppOptionFn(cfg *IntegrationAppConfig) { - WithInitChainerModuleFn(newFaucetInitChainerFn(FaucetAddrStr, FaucetAmountUpokt))(cfg) -} - // App is a test application that can be used to test the behaviour when none // of the modules are mocked and their integration (cross module interaction) // needs to be validated. @@ -117,6 +107,11 @@ type App struct { ringClient crypto.RingClient preGeneratedAccts *testkeyring.PreGeneratedAccountIterator + // faucetBech32 is a random address which is selected as the primary faucet + // to fund other accounts. It is funded with faucetAmountUpokt coins such that + // it can be used as a faucet for integration tests. + faucetBech32 string + // Some default helper fixtures for general testing. // They're publicly exposed and should/could be improved and expand on // over time. @@ -148,8 +143,14 @@ func NewIntegrationApp( ) *App { t.Helper() + // Prepare the faucet init-chainer module option function. It ensures that the + // bank module genesis state includes the faucet account with a large balance. + faucetBech32 := sample.AccAddress() + faucetInitChainerFn := newFaucetInitChainerFn(faucetBech32, faucetAmountUpokt) + initChainerModuleOptFn := WithInitChainerModuleFn(faucetInitChainerFn) + cfg := &IntegrationAppConfig{} - opts = append(opts, defaultIntegrationAppOptionFn) + opts = append(opts, initChainerModuleOptFn) for _, opt := range opts { opt(cfg) } @@ -673,10 +674,15 @@ func (app *App) RunMsg(t *testing.T, msg sdk.Msg) (tx.MsgResponse, error) { return txMsgRes[0], err } +// GetFaucetBech32 returns the faucet address used by the application. +func (app *App) GetFaucetBech32() string { + return app.faucetBech32 +} + // RunMsgs provides the ability to process messages by packing them into a tx and // driving the ABCI through block finalization. It returns a slice of tx.MsgResponse -// (any) whose elements correspond to the request message of the same index. These -// responses can be type asserted to the expected response type. +// (any) whose elements correspond to the request message of the same index. +// These responses can be type asserted to the expected response type. // If execution for ANY message fails, ALL failing messages' errors are joined and // returned. In order to run a message, the application must have a handler for it. // These handlers are registered on the application message service router. @@ -851,7 +857,7 @@ func (app *App) nextBlockUpdateCtx() { // on-chain actors for use in tests. In creates a service, and stakes a supplier // and application as well as funding the bank balance of the default supplier. // -// TODO_IMPROVE: Eliminate usage of and remove this function in favor of +// TODO_TECHDEBT(@bryanchriswhite): Eliminate usage of and remove this function in favor of // integration.NewInitChainerModuleGenesisStateOptionFn. func (app *App) setupDefaultActorsState( t *testing.T, diff --git a/testutil/integration/suites/base.go b/testutil/integration/suites/base.go index 8af7e5e24..491cae620 100644 --- a/testutil/integration/suites/base.go +++ b/testutil/integration/suites/base.go @@ -1,5 +1,3 @@ -//go:build integration - package suites import ( @@ -85,7 +83,7 @@ func (s *BaseIntegrationSuite) FundAddress( ) { coinUpokt := cosmostypes.NewInt64Coin(volatile.DenomuPOKT, amountUpokt) sendMsg := &banktypes.MsgSend{ - FromAddress: integration.FaucetAddrStr, + FromAddress: s.GetApp().GetFaucetBech32(), ToAddress: addr.String(), Amount: cosmostypes.NewCoins(coinUpokt), } diff --git a/testutil/integration/suites/base_test.go b/testutil/integration/suites/base_test.go index caa2f3fc0..39b0b6872 100644 --- a/testutil/integration/suites/base_test.go +++ b/testutil/integration/suites/base_test.go @@ -1,5 +1,3 @@ -//go:build integration - package suites import ( @@ -12,23 +10,22 @@ import ( "github.com/pokt-network/poktroll/app/volatile" "github.com/pokt-network/poktroll/testutil/events" - "github.com/pokt-network/poktroll/testutil/integration" "github.com/pokt-network/poktroll/testutil/sample" gatewaytypes "github.com/pokt-network/poktroll/x/gateway/types" ) -var gatewayStakeAmount = int64(1000) - -type BaseIntegrationSuiteTestSuite struct { +// baseIntegrationSuiteTestSuite is a test suite which embeds BaseIntegrationSuite. +// **in order to test it**. It is intended to be embedded in other test suites. +type baseIntegrationSuiteTestSuite struct { BaseIntegrationSuite } -func (s *BaseIntegrationSuite) SetupTest() { +func (s *baseIntegrationSuiteTestSuite) SetupTest() { // Reset app to nil before each test. s.app = nil } -func (s *BaseIntegrationSuiteTestSuite) TestGetApp_PanicsIfNil() { +func (s *baseIntegrationSuiteTestSuite) TestGetApp_PanicsIfNil() { require.Nil(s.T(), s.app) // Expect the call to GetApp() to panic, defer recovery to check. @@ -46,19 +43,19 @@ func (s *BaseIntegrationSuiteTestSuite) TestGetApp_PanicsIfNil() { s.GetApp() } -func (s *BaseIntegrationSuiteTestSuite) TestNewApp() { +func (s *baseIntegrationSuiteTestSuite) TestNewApp() { require.Nil(s.T(), s.app) app := s.NewApp(s.T()) require.Same(s.T(), app, s.app) } -func (s *BaseIntegrationSuiteTestSuite) TestGetApp_ReturnsApp() { +func (s *baseIntegrationSuiteTestSuite) TestGetApp_ReturnsApp() { app := s.NewApp(s.T()) require.Same(s.T(), app, s.GetApp()) } -func (s *BaseIntegrationSuiteTestSuite) TestSetApp() { +func (s *baseIntegrationSuiteTestSuite) TestSetApp() { // Construct an app. app := s.NewApp(s.T()) @@ -69,19 +66,19 @@ func (s *BaseIntegrationSuiteTestSuite) TestSetApp() { require.Same(s.T(), app, s.app) } -func (s *BaseIntegrationSuiteTestSuite) TestGetPoktrollModuleNames() { +func (s *baseIntegrationSuiteTestSuite) TestGetPoktrollModuleNames() { moduleNames := s.GetPoktrollModuleNames() require.Greater(s.T(), len(moduleNames), 0, "expected non-empty module names") require.ElementsMatch(s.T(), s.poktrollModuleNames, moduleNames) } -func (s *BaseIntegrationSuiteTestSuite) TestGetCosmosModuleNames() { +func (s *baseIntegrationSuiteTestSuite) TestGetCosmosModuleNames() { moduleNames := s.GetCosmosModuleNames() require.Greater(s.T(), len(moduleNames), 0, "expected non-empty module names") require.ElementsMatch(s.T(), s.cosmosModuleNames, moduleNames) } -func (s *BaseIntegrationSuiteTestSuite) TestSdkCtx() { +func (s *baseIntegrationSuiteTestSuite) TestSdkCtx() { s.NewApp(s.T()) sdkCtx := s.SdkCtx() @@ -89,7 +86,7 @@ func (s *BaseIntegrationSuiteTestSuite) TestSdkCtx() { require.Greater(s.T(), sdkCtx.BlockHeight(), int64(0)) } -func (s *BaseIntegrationSuiteTestSuite) TestFundAddressAndGetBankQueryClient() { +func (s *baseIntegrationSuiteTestSuite) TestFundAddressAndGetBankQueryClient() { s.NewApp(s.T()) fundAmount := int64(1000) fundAddr, err := cosmostypes.AccAddressFromBech32(sample.AccAddress()) @@ -116,7 +113,7 @@ func (s *BaseIntegrationSuiteTestSuite) TestFundAddressAndGetBankQueryClient() { require.Equal(s.T(), fundAmount, balRes.GetBalance().Amount.Int64()) } -func (s *BaseIntegrationSuiteTestSuite) TestFilterLatestEventsWithNewMsgEventMatchFn() { +func (s *baseIntegrationSuiteTestSuite) TestFilterLatestEventsWithNewMsgEventMatchFn() { expectedNumEvents := 3 s.NewApp(s.T()) @@ -134,7 +131,7 @@ func (s *BaseIntegrationSuiteTestSuite) TestFilterLatestEventsWithNewMsgEventMat require.Equal(s.T(), 0, len(s.SdkCtx().EventManager().Events()), "expected no events in the next block") } -func (s *BaseIntegrationSuiteTestSuite) TestFilterLatestEventsWithNewEventTypeMatchFn() { +func (s *baseIntegrationSuiteTestSuite) TestFilterLatestEventsWithNewEventTypeMatchFn() { expectedNumEvents := 3 s.NewApp(s.T()) @@ -155,7 +152,7 @@ func (s *BaseIntegrationSuiteTestSuite) TestFilterLatestEventsWithNewEventTypeMa require.Equal(s.T(), 0, len(s.SdkCtx().EventManager().Events()), "expected no events in the next block") } -func (s *BaseIntegrationSuiteTestSuite) TestGetAttributeValue() { +func (s *baseIntegrationSuiteTestSuite) TestGetAttributeValue() { s.NewApp(s.T()) s.emitBankMsgSendEvents(1) @@ -179,11 +176,11 @@ func (s *BaseIntegrationSuiteTestSuite) TestGetAttributeValue() { // emitBankMsgSendEvents causes the bank module to emit events as the result // of handling a MsgSend message which are intended to be used to make assertions // in tests. -func (s *BaseIntegrationSuiteTestSuite) emitBankMsgSendEvents(expectedNumEvents int) { +func (s *baseIntegrationSuiteTestSuite) emitBankMsgSendEvents(expectedNumEvents int) { msgs := make([]cosmostypes.Msg, 0) for i := 0; i < expectedNumEvents; i++ { - faucetAddr, err := cosmostypes.AccAddressFromBech32(integration.FaucetAddrStr) + faucetAddr, err := cosmostypes.AccAddressFromBech32(s.GetApp().GetFaucetBech32()) require.NoError(s.T(), err) randomAddr, err := cosmostypes.AccAddressFromBech32(sample.AccAddress()) @@ -204,7 +201,7 @@ func (s *BaseIntegrationSuiteTestSuite) emitBankMsgSendEvents(expectedNumEvents // emitPoktrollGatewayUnstakedEvents emits the given number of EventGatewayUnstaked // events to the event manager. These events are intended to be used to make // assertions in tests. -func (s *BaseIntegrationSuiteTestSuite) emitPoktrollGatewayUnstakedEvents(expectedNumEvents int) { +func (s *baseIntegrationSuiteTestSuite) emitPoktrollGatewayUnstakedEvents(expectedNumEvents int) { for i := 0; i < expectedNumEvents; i++ { err := s.SdkCtx().EventManager().EmitTypedEvent(&gatewaytypes.EventGatewayUnstaked{ Address: sample.AccAddress(), @@ -215,5 +212,5 @@ func (s *BaseIntegrationSuiteTestSuite) emitPoktrollGatewayUnstakedEvents(expect // Run the test suite. func TestBaseIntegrationSuite(t *testing.T) { - suite.Run(t, new(BaseIntegrationSuiteTestSuite)) + suite.Run(t, new(baseIntegrationSuiteTestSuite)) } diff --git a/testutil/integration/suites/interface.go b/testutil/integration/suites/interface.go index c09b2cf28..d88246a90 100644 --- a/testutil/integration/suites/interface.go +++ b/testutil/integration/suites/interface.go @@ -1,5 +1,3 @@ -//go:build integration - package suites import ( @@ -35,8 +33,9 @@ type IntegrationSuite interface { // of the integration app. GetBankQueryClient() banktypes.QueryClient - // FilterLatestEvents returns the most recent events in the event manager that - // match the given matchFn. + // FilterEvents returns the events from the event manager which match the given + // matchFn. Events are returned in reverse order, i.e. the most recent event is + // first. FilterEvents(matchFn func(*cosmostypes.Event) bool) []*cosmostypes.Event // LatestMatchingEvent returns the most recent event in the event manager that // matches the given matchFn. diff --git a/testutil/sample/sample.go b/testutil/sample/sample.go index e29c328a1..1c533a4dd 100644 --- a/testutil/sample/sample.go +++ b/testutil/sample/sample.go @@ -31,7 +31,9 @@ func AccAddressAndPubKeyEd25519() (string, cryptotypes.PubKey) { } // ValAddress returns a sample validator address, which has the prefix -// of validators when converted to bech32. +// of validators when converted to bech32. Validator addresses identify +// the validator operator on-chain account and are derived using secp256k1. +// See: https://docs.cosmos.network/main/learn/beginner/accounts#addresses func ValAddress() string { _, pk := AccAddressAndPubKey() validatorAddress := tmhash.SumTruncated(pk.Address()) @@ -40,9 +42,11 @@ func ValAddress() string { } // ConsAddress returns a sample consensus node address, which has the prefix -// of consensus nodes when converted to bech32. +// of consensus nodes when converted to bech32. Consensus addresses identify +// the validator node in the consensus engine and are derived using ed25519. +// See: https://docs.cosmos.network/main/learn/beginner/accounts#addresses func ConsAddress() string { - _, pk := AccAddressAndPubKey() + _, pk := AccAddressAndPubKeyEd25519() consensusAddress := tmhash.SumTruncated(pk.Address()) valAddress := sdk.ConsAddress(consensusAddress) return valAddress.String() diff --git a/testutil/testclient/testqueryclients/bankquerier.go b/testutil/testclient/testqueryclients/bankquerier.go new file mode 100644 index 000000000..cd47a900a --- /dev/null +++ b/testutil/testclient/testqueryclients/bankquerier.go @@ -0,0 +1,25 @@ +package testqueryclients + +import ( + "testing" + + "cosmossdk.io/math" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/golang/mock/gomock" + + "github.com/pokt-network/poktroll/app/volatile" + "github.com/pokt-network/poktroll/testutil/mockclient" +) + +// NewTestBankQueryClientWithWithBalance creates a mock of the BankQueryClient that +// uses the provided balance for its GetBalance() method implementation. +func NewTestBankQueryClientWithBalance(t *testing.T, balance int64) *mockclient.MockBankQueryClient { + ctrl := gomock.NewController(t) + bankQueryClientMock := mockclient.NewMockBankQueryClient(ctrl) + bankQueryClientMock.EXPECT(). + GetBalance(gomock.Any(), gomock.Any()). + Return(&sdk.Coin{Denom: volatile.DenomuPOKT, Amount: math.NewInt(balance)}, nil). + AnyTimes() + + return bankQueryClientMock +} diff --git a/testutil/testtree/tree.go b/testutil/testtree/tree.go index 5774de7c8..6743d753f 100644 --- a/testutil/testtree/tree.go +++ b/testutil/testtree/tree.go @@ -121,18 +121,18 @@ func NewProof( t.Helper() // Generate a closest proof from the session tree using closestProofPath. - merkleProof, err := sessionTree.ProveClosest(closestProofPath) + merkleCompactProof, err := sessionTree.ProveClosest(closestProofPath) require.NoError(t, err) - require.NotNil(t, merkleProof) + require.NotNil(t, merkleCompactProof) // Serialize the closest merkle proof. - merkleProofBz, err := merkleProof.Marshal() + merkleCompactProofBz, err := merkleCompactProof.Marshal() require.NoError(t, err) return &prooftypes.Proof{ SupplierOperatorAddress: supplierOperatorAddr, SessionHeader: sessionHeader, - ClosestMerkleProof: merkleProofBz, + ClosestMerkleProof: merkleCompactProofBz, } } diff --git a/x/proof/keeper/proof.go b/x/proof/keeper/proof.go index 1309920a0..915803199 100644 --- a/x/proof/keeper/proof.go +++ b/x/proof/keeper/proof.go @@ -15,8 +15,6 @@ import ( func (k Keeper) UpsertProof(ctx context.Context, proof types.Proof) { logger := k.Logger().With("method", "UpsertProof") - // TODO_MAINNET(#427): Use the marshal method on the SparseCompactClosestProof - // type here instead in order to reduce space stored on chain. proofBz := k.cdc.MustMarshal(&proof) storeAdapter := runtime.KVStoreAdapter(k.storeService.OpenKVStore(ctx)) diff --git a/x/proof/keeper/proof_validation.go b/x/proof/keeper/proof_validation.go index 638e01d0a..f771ca626 100644 --- a/x/proof/keeper/proof_validation.go +++ b/x/proof/keeper/proof_validation.go @@ -91,16 +91,23 @@ func (k Keeper) EnsureValidProof( } // Unmarshal the closest merkle proof from the message. - sparseMerkleClosestProof := &smt.SparseMerkleClosestProof{} - if err = sparseMerkleClosestProof.Unmarshal(proof.ClosestMerkleProof); err != nil { + sparseCompactMerkleClosestProof := &smt.SparseCompactMerkleClosestProof{} + if err = sparseCompactMerkleClosestProof.Unmarshal(proof.ClosestMerkleProof); err != nil { return types.ErrProofInvalidProof.Wrapf( "failed to unmarshal closest merkle proof: %s", err, ) } - // TODO_MAINNET(#427): Utilize smt.VerifyCompactClosestProof here to - // reduce on-chain storage requirements for proofs. + // SparseCompactMerkeClosestProof does not implement GetValueHash, so we need to decompact it. + sparseMerkleClosestProof, err := smt.DecompactClosestProof(sparseCompactMerkleClosestProof, &protocol.SmtSpec) + if err != nil { + return types.ErrProofInvalidProof.Wrapf( + "failed to decompact closest merkle proof: %s", + err, + ) + } + // Get the relay request and response from the proof.GetClosestMerkleProof. relayBz := sparseMerkleClosestProof.GetValueHash(&protocol.SmtSpec) relay := &servicetypes.Relay{} diff --git a/x/proof/keeper/proof_validation_test.go b/x/proof/keeper/proof_validation_test.go index 7c7ba667f..13222338d 100644 --- a/x/proof/keeper/proof_validation_test.go +++ b/x/proof/keeper/proof_validation_test.go @@ -156,8 +156,8 @@ func TestEnsureValidProof_Error(t *testing.T) { // Store the expected error returned during deserialization of the invalid // closest Merkle proof bytes. - sparseMerkleClosestProof := &smt.SparseMerkleClosestProof{} - expectedInvalidProofUnmarshalErr := sparseMerkleClosestProof.Unmarshal(invalidClosestProofBytes) + sparseCompactMerkleClosestProof := &smt.SparseCompactMerkleClosestProof{} + expectedInvalidProofUnmarshalErr := sparseCompactMerkleClosestProof.Unmarshal(invalidClosestProofBytes) // Construct a relay to be mangled such that it fails to deserialize in order // to set the error expectation for the relevant test case. @@ -611,9 +611,12 @@ func TestEnsureValidProof_Error(t *testing.T) { ) // Extract relayHash to check below that it's difficulty is insufficient - sparseMerkleClosestProof := &smt.SparseMerkleClosestProof{} - err = sparseMerkleClosestProof.Unmarshal(proof.ClosestMerkleProof) + err = sparseCompactMerkleClosestProof.Unmarshal(proof.ClosestMerkleProof) require.NoError(t, err) + var sparseMerkleClosestProof *smt.SparseMerkleClosestProof + sparseMerkleClosestProof, err = smt.DecompactClosestProof(sparseCompactMerkleClosestProof, &protocol.SmtSpec) + require.NoError(t, err) + relayBz := sparseMerkleClosestProof.GetValueHash(&protocol.SmtSpec) relayHashArr := protocol.GetRelayHashFromBytes(relayBz) relayHash := relayHashArr[:] diff --git a/x/proof/types/tx.pb.go b/x/proof/types/tx.pb.go index a98755961..8d5376a58 100644 --- a/x/proof/types/tx.pb.go +++ b/x/proof/types/tx.pb.go @@ -403,7 +403,7 @@ func (m *MsgCreateClaimResponse) GetClaim() *Claim { type MsgSubmitProof struct { SupplierOperatorAddress string `protobuf:"bytes,1,opt,name=supplier_operator_address,json=supplierOperatorAddress,proto3" json:"supplier_operator_address,omitempty"` SessionHeader *types1.SessionHeader `protobuf:"bytes,2,opt,name=session_header,json=sessionHeader,proto3" json:"session_header,omitempty"` - // serialized version of *smt.SparseMerkleClosestProof + // serialized version of *smt.SparseCompactMerkleClosestProof Proof []byte `protobuf:"bytes,3,opt,name=proof,proto3" json:"proof,omitempty"` } diff --git a/x/proof/types/types.pb.go b/x/proof/types/types.pb.go index 52230cff6..a316197dc 100644 --- a/x/proof/types/types.pb.go +++ b/x/proof/types/types.pb.go @@ -89,7 +89,7 @@ type Proof struct { SupplierOperatorAddress string `protobuf:"bytes,1,opt,name=supplier_operator_address,json=supplierOperatorAddress,proto3" json:"supplier_operator_address,omitempty"` // The session header of the session that this claim is for. SessionHeader *types.SessionHeader `protobuf:"bytes,2,opt,name=session_header,json=sessionHeader,proto3" json:"session_header,omitempty"` - // The serialized SMST proof from the `#ClosestProof()` method. + // The serialized SMST compacted proof from the `#ClosestProof()` method. ClosestMerkleProof []byte `protobuf:"bytes,3,opt,name=closest_merkle_proof,json=closestMerkleProof,proto3" json:"closest_merkle_proof,omitempty"` } diff --git a/x/tokenomics/keeper/token_logic_modules_test.go b/x/tokenomics/keeper/token_logic_modules_test.go index 6931616f0..d90691bd6 100644 --- a/x/tokenomics/keeper/token_logic_modules_test.go +++ b/x/tokenomics/keeper/token_logic_modules_test.go @@ -322,10 +322,10 @@ func TestProcessTokenLogicModules_TLMGlobalMint_Valid_MintDistributionCorrect(t service := prepareTestService(serviceComputeUnitsPerRelay) numRelays := uint64(1000) // By supplier for application in this session numTokensClaimed := float64(numRelays * serviceComputeUnitsPerRelay * globalComputeUnitsToTokensMultiplier) - validatorConsAddr := sample.ValAddress() + validatorAddr := sample.ValAddress() // Prepare the keepers - keepers, ctx := testkeeper.NewTokenomicsModuleKeepers(t, nil, testkeeper.WithService(*service), testkeeper.WithProposerAddr(validatorConsAddr)) + keepers, ctx := testkeeper.NewTokenomicsModuleKeepers(t, nil, testkeeper.WithService(*service), testkeeper.WithProposerAddr(validatorAddr)) keepers.SetService(ctx, *service) // Set compute_units_to_tokens_multiplier to simplify expectation calculations. @@ -370,7 +370,7 @@ func TestProcessTokenLogicModules_TLMGlobalMint_Valid_MintDistributionCorrect(t // Prepare addresses daoAddress := authtypes.NewModuleAddress(govtypes.ModuleName) appAddress := app.Address - proposerAddress := sample.AccAddressFromConsAddress(validatorConsAddr) + proposerAddress := sample.AccAddressFromConsAddress(validatorAddr) // Determine balances before inflation daoBalanceBefore := getBalance(t, ctx, keepers, daoAddress.String())