diff --git a/CHANGELOG.md b/CHANGELOG.md index d12f32df9fb..b7062777816 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ # UNRELEASED ## New features +- New ChainIndexer subsystem to index Filecoin chain state such as tipsets, messages, events and ETH transactions for accurate and faster RPC responses. The `ChainIndexer` replaces the existing `MsgIndex`, `EthTxHashLookup` and `EventIndex` implementations in Lotus, which [suffer from a multitude of known problems](https://github.com/filecoin-project/lotus/issues/12293). If you are an RPC provider or a node operator who uses or exposes Ethereum and/or events APIs, please refer to the [ChainIndexer documentation for operators](./documentation/en/chain-indexer-overview-for-operators.md) for information on how to enable, configure and use the new Indexer. While there is no automated data migration and one can upgrade and downgrade without backups, there are manual steps that need to be taken to backfill data when upgrading to this Lotus version, or downgrading to the previous version without ChainIndexer. Please be aware that that this feature removes some options in the Lotus configuration file, if these have been set, Lotus will report an error when starting. See the documentation for more information. - Update `EthGetBlockByNumber` to return a pointer to ethtypes.EthBlock or nil for null rounds. ([filecoin-project/lotus#12529](https://github.com/filecoin-project/lotus/pull/12529)) - Reduce size of embedded genesis CAR files by removing WASM actor blocks and compressing with zstd. This reduces the `lotus` binary size by approximately 10 MiB. ([filecoin-project/lotus#12439](https://github.com/filecoin-project/lotus/pull/12439)) - Add ChainSafe operated Calibration archival node to the bootstrap list ([filecoin-project/lotus#12517](https://github.com/filecoin-project/lotus/pull/12517)) diff --git a/api/api_full.go b/api/api_full.go index 944bfa88e28..ef2253a16e8 100644 --- a/api/api_full.go +++ b/api/api_full.go @@ -63,6 +63,37 @@ type FullNode interface { Common Net + // MethodGroup: ChainIndexer + // The ChainIndexer method group contains methods for interacting with the chain indexer. + + // ChainValidateIndex validates the integrity of and optionally backfills + // the chain index at a specific epoch. + // + // It can be used to: + // + // 1. Validate the chain index at a specific epoch: + // - Ensures consistency between indexed data and actual chain state + // - Reports any errors found during validation (i.e. the indexed data does not match the actual chain state, missing data, etc.) + // + // 2. Optionally backfill missing data: + // - Backfills data if the index is missing information for the specified epoch + // - Backfilling only occurs when the `backfill` parameter is set to `true` + // + // 3. Detect "holes" in the index: + // - If `backfill` is `false` and the index lacks data for the specified epoch, the API returns an error indicating missing data + // + // Parameters: + // - epoch: The specific chain epoch for which to validate/backfill the index. + // - backfill: A boolean flag indicating whether to attempt backfilling of missing data if the index does not have data for the + // specified epoch. + // + // Returns: + // - *types.IndexValidation: A pointer to an IndexValidation struct containing the results of the validation/backfill. + // - error: An error object if the validation/backfill fails. The error message will contain details about the index + // corruption if the call fails because of an incosistency between indexed data and the actual chain state. + // Note: The API returns an error if the index does not have data for the specified epoch and backfill is set to false. + ChainValidateIndex(ctx context.Context, epoch abi.ChainEpoch, backfill bool) (*types.IndexValidation, error) //perm:write + // MethodGroup: Chain // The Chain method group contains methods for interacting with the // blockchain, but that do not require any form of state computation. diff --git a/api/mocks/mock_full.go b/api/mocks/mock_full.go index 7c03b446fde..841a55b4317 100644 --- a/api/mocks/mock_full.go +++ b/api/mocks/mock_full.go @@ -511,6 +511,21 @@ func (mr *MockFullNodeMockRecorder) ChainTipSetWeight(arg0, arg1 interface{}) *g return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainTipSetWeight", reflect.TypeOf((*MockFullNode)(nil).ChainTipSetWeight), arg0, arg1) } +// ChainValidateIndex mocks base method. +func (m *MockFullNode) ChainValidateIndex(arg0 context.Context, arg1 abi.ChainEpoch, arg2 bool) (*types.IndexValidation, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChainValidateIndex", arg0, arg1, arg2) + ret0, _ := ret[0].(*types.IndexValidation) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChainValidateIndex indicates an expected call of ChainValidateIndex. +func (mr *MockFullNodeMockRecorder) ChainValidateIndex(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChainValidateIndex", reflect.TypeOf((*MockFullNode)(nil).ChainValidateIndex), arg0, arg1, arg2) +} + // Closing mocks base method. func (m *MockFullNode) Closing(arg0 context.Context) (<-chan struct{}, error) { m.ctrl.T.Helper() diff --git a/api/proxy_gen.go b/api/proxy_gen.go index 37ce3c03c87..ad1d7feec87 100644 --- a/api/proxy_gen.go +++ b/api/proxy_gen.go @@ -170,6 +170,8 @@ type FullNodeMethods struct { ChainTipSetWeight func(p0 context.Context, p1 types.TipSetKey) (types.BigInt, error) `perm:"read"` + ChainValidateIndex func(p0 context.Context, p1 abi.ChainEpoch, p2 bool) (*types.IndexValidation, error) `perm:"write"` + CreateBackup func(p0 context.Context, p1 string) error `perm:"admin"` EthAccounts func(p0 context.Context) ([]ethtypes.EthAddress, error) `perm:"read"` @@ -1651,6 +1653,17 @@ func (s *FullNodeStub) ChainTipSetWeight(p0 context.Context, p1 types.TipSetKey) return *new(types.BigInt), ErrNotSupported } +func (s *FullNodeStruct) ChainValidateIndex(p0 context.Context, p1 abi.ChainEpoch, p2 bool) (*types.IndexValidation, error) { + if s.Internal.ChainValidateIndex == nil { + return nil, ErrNotSupported + } + return s.Internal.ChainValidateIndex(p0, p1, p2) +} + +func (s *FullNodeStub) ChainValidateIndex(p0 context.Context, p1 abi.ChainEpoch, p2 bool) (*types.IndexValidation, error) { + return nil, ErrNotSupported +} + func (s *FullNodeStruct) CreateBackup(p0 context.Context, p1 string) error { if s.Internal.CreateBackup == nil { return ErrNotSupported diff --git a/build/openrpc/full.json b/build/openrpc/full.json index fef48110f2b..45018411531 100644 --- a/build/openrpc/full.json +++ b/build/openrpc/full.json @@ -37,7 +37,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1346" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1348" } }, { @@ -60,7 +60,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1357" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1359" } }, { @@ -103,7 +103,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1368" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1370" } }, { @@ -214,7 +214,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1390" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1392" } }, { @@ -454,7 +454,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1401" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1403" } }, { @@ -685,7 +685,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1412" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1414" } }, { @@ -784,7 +784,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1423" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1425" } }, { @@ -816,7 +816,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1434" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1436" } }, { @@ -922,7 +922,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1445" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1447" } }, { @@ -1019,7 +1019,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1456" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1458" } }, { @@ -1078,7 +1078,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1467" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1469" } }, { @@ -1171,7 +1171,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1478" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1480" } }, { @@ -1255,7 +1255,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1489" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1491" } }, { @@ -1355,7 +1355,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1500" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1502" } }, { @@ -1411,7 +1411,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1511" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1513" } }, { @@ -1484,7 +1484,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1522" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1524" } }, { @@ -1557,7 +1557,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1533" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1535" } }, { @@ -1604,7 +1604,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1544" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1546" } }, { @@ -1636,7 +1636,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1555" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1557" } }, { @@ -1691,7 +1691,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1566" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1568" } }, { @@ -1743,7 +1743,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1588" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1590" } }, { @@ -1780,7 +1780,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1599" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1601" } }, { @@ -1827,7 +1827,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1610" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1612" } }, { @@ -1874,7 +1874,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1621" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1623" } }, { @@ -1954,7 +1954,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1632" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1634" } }, { @@ -2006,7 +2006,111 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1643" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1645" + } + }, + { + "name": "Filecoin.ChainValidateIndex", + "description": "```go\nfunc (s *FullNodeStruct) ChainValidateIndex(p0 context.Context, p1 abi.ChainEpoch, p2 bool) (*types.IndexValidation, error) {\n\tif s.Internal.ChainValidateIndex == nil {\n\t\treturn nil, ErrNotSupported\n\t}\n\treturn s.Internal.ChainValidateIndex(p0, p1, p2)\n}\n```", + "summary": "ChainValidateIndex validates the integrity of and optionally backfills\nthe chain index at a specific epoch.\n\nIt can be used to:\n\n1. Validate the chain index at a specific epoch:\n - Ensures consistency between indexed data and actual chain state\n - Reports any errors found during validation (i.e. the indexed data does not match the actual chain state, missing data, etc.)\n\n2. Optionally backfill missing data:\n - Backfills data if the index is missing information for the specified epoch\n - Backfilling only occurs when the `backfill` parameter is set to `true`\n\n3. Detect \"holes\" in the index:\n - If `backfill` is `false` and the index lacks data for the specified epoch, the API returns an error indicating missing data\n\nParameters:\n - epoch: The specific chain epoch for which to validate/backfill the index.\n - backfill: A boolean flag indicating whether to attempt backfilling of missing data if the index does not have data for the\n specified epoch.\n\nReturns:\n - *types.IndexValidation: A pointer to an IndexValidation struct containing the results of the validation/backfill.\n - error: An error object if the validation/backfill fails. The error message will contain details about the index\n corruption if the call fails because of an incosistency between indexed data and the actual chain state.\n Note: The API returns an error if the index does not have data for the specified epoch and backfill is set to false.\n", + "paramStructure": "by-position", + "params": [ + { + "name": "p1", + "description": "abi.ChainEpoch", + "summary": "", + "schema": { + "title": "number", + "description": "Number is a number", + "examples": [ + 10101 + ], + "type": [ + "number" + ] + }, + "required": true, + "deprecated": false + }, + { + "name": "p2", + "description": "bool", + "summary": "", + "schema": { + "examples": [ + true + ], + "type": [ + "boolean" + ] + }, + "required": true, + "deprecated": false + } + ], + "result": { + "name": "*types.IndexValidation", + "description": "*types.IndexValidation", + "summary": "", + "schema": { + "examples": [ + { + "TipSetKey": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "Height": 10101, + "IndexedMessagesCount": 42, + "IndexedEventsCount": 42, + "IndexedEventEntriesCount": 42, + "Backfilled": true, + "IsNullRound": true + } + ], + "additionalProperties": false, + "properties": { + "Backfilled": { + "type": "boolean" + }, + "Height": { + "title": "number", + "type": "number" + }, + "IndexedEventEntriesCount": { + "title": "number", + "type": "number" + }, + "IndexedEventsCount": { + "title": "number", + "type": "number" + }, + "IndexedMessagesCount": { + "title": "number", + "type": "number" + }, + "IsNullRound": { + "type": "boolean" + }, + "TipSetKey": { + "additionalProperties": false, + "type": "object" + } + }, + "type": [ + "object" + ] + }, + "required": true, + "deprecated": false + }, + "deprecated": false, + "externalDocs": { + "description": "Github remote link", + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1656" } }, { @@ -2045,7 +2149,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1654" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1667" } }, { @@ -2092,7 +2196,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1665" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1678" } }, { @@ -2147,7 +2251,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1676" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1689" } }, { @@ -2176,7 +2280,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1687" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1700" } }, { @@ -2313,7 +2417,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1698" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1711" } }, { @@ -2342,7 +2446,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1709" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1722" } }, { @@ -2396,7 +2500,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1720" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1733" } }, { @@ -2487,7 +2591,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1731" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1744" } }, { @@ -2515,7 +2619,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1742" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1755" } }, { @@ -2605,7 +2709,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1753" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1766" } }, { @@ -2861,7 +2965,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1764" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1777" } }, { @@ -3106,7 +3210,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1775" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1788" } }, { @@ -3382,7 +3486,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1786" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1799" } }, { @@ -3675,7 +3779,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1797" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1810" } }, { @@ -3731,7 +3835,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1808" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1821" } }, { @@ -3778,7 +3882,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1819" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1832" } }, { @@ -3876,7 +3980,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1830" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1843" } }, { @@ -3942,7 +4046,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1841" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1854" } }, { @@ -4008,7 +4112,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1852" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1865" } }, { @@ -4117,7 +4221,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1863" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1876" } }, { @@ -4175,7 +4279,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1874" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1887" } }, { @@ -4297,7 +4401,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1885" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1898" } }, { @@ -4506,7 +4610,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1896" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1909" } }, { @@ -4706,7 +4810,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1907" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1920" } }, { @@ -4898,7 +5002,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1918" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1931" } }, { @@ -5107,7 +5211,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1929" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1942" } }, { @@ -5198,7 +5302,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1940" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1953" } }, { @@ -5256,7 +5360,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1951" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1964" } }, { @@ -5514,7 +5618,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1962" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1975" } }, { @@ -5789,7 +5893,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1973" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1986" } }, { @@ -5817,7 +5921,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1984" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1997" } }, { @@ -5855,7 +5959,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L1995" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2008" } }, { @@ -5963,7 +6067,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2006" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2019" } }, { @@ -6001,7 +6105,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2017" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2030" } }, { @@ -6030,7 +6134,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2028" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2041" } }, { @@ -6093,7 +6197,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2039" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2052" } }, { @@ -6156,7 +6260,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2050" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2063" } }, { @@ -6219,7 +6323,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2061" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2074" } }, { @@ -6264,7 +6368,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2072" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2085" } }, { @@ -6386,7 +6490,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2083" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2096" } }, { @@ -6562,7 +6666,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2094" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2107" } }, { @@ -6717,7 +6821,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2105" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2118" } }, { @@ -6839,7 +6943,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2116" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2129" } }, { @@ -6893,7 +6997,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2127" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2140" } }, { @@ -6947,7 +7051,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2138" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2151" } }, { @@ -7132,7 +7236,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2149" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2162" } }, { @@ -7215,7 +7319,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2160" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2173" } }, { @@ -7298,7 +7402,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2171" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2184" } }, { @@ -7465,7 +7569,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2182" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2195" } }, { @@ -7670,7 +7774,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2193" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2206" } }, { @@ -7764,7 +7868,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2204" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2217" } }, { @@ -7810,7 +7914,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2215" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2228" } }, { @@ -7837,7 +7941,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2226" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2239" } }, { @@ -7892,7 +7996,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2237" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2250" } }, { @@ -7971,7 +8075,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2248" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2261" } }, { @@ -8034,7 +8138,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2259" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2272" } }, { @@ -8177,7 +8281,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2270" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2283" } }, { @@ -8304,7 +8408,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2281" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2294" } }, { @@ -8406,7 +8510,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2292" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2305" } }, { @@ -8629,7 +8733,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2303" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2316" } }, { @@ -8812,7 +8916,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2314" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2327" } }, { @@ -8892,7 +8996,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2325" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2338" } }, { @@ -8937,7 +9041,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2336" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2349" } }, { @@ -8993,7 +9097,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2347" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2360" } }, { @@ -9073,7 +9177,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2358" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2371" } }, { @@ -9153,7 +9257,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2369" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2382" } }, { @@ -9638,7 +9742,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2380" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2393" } }, { @@ -9832,7 +9936,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2391" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2404" } }, { @@ -9987,7 +10091,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2402" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2415" } }, { @@ -10236,7 +10340,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2413" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2426" } }, { @@ -10391,7 +10495,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2424" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2437" } }, { @@ -10568,7 +10672,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2435" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2448" } }, { @@ -10666,7 +10770,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2446" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2459" } }, { @@ -10831,7 +10935,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2457" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2470" } }, { @@ -10870,7 +10974,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2468" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2481" } }, { @@ -10935,7 +11039,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2479" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2492" } }, { @@ -10981,7 +11085,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2490" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2503" } }, { @@ -11131,7 +11235,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2501" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2514" } }, { @@ -11268,7 +11372,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2512" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2525" } }, { @@ -11499,7 +11603,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2523" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2536" } }, { @@ -11636,7 +11740,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2534" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2547" } }, { @@ -11801,7 +11905,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2545" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2558" } }, { @@ -11878,7 +11982,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2556" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2569" } }, { @@ -12073,7 +12177,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2578" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2591" } }, { @@ -12252,7 +12356,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2589" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2602" } }, { @@ -12414,7 +12518,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2600" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2613" } }, { @@ -12562,7 +12666,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2611" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2624" } }, { @@ -12790,7 +12894,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2622" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2635" } }, { @@ -12938,7 +13042,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2633" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2646" } }, { @@ -13150,7 +13254,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2644" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2657" } }, { @@ -13356,7 +13460,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2655" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2668" } }, { @@ -13424,7 +13528,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2666" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2679" } }, { @@ -13541,7 +13645,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2677" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2690" } }, { @@ -13632,7 +13736,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2688" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2701" } }, { @@ -13718,7 +13822,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2699" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2712" } }, { @@ -13913,7 +14017,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2710" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2723" } }, { @@ -14075,7 +14179,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2721" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2734" } }, { @@ -14271,7 +14375,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2732" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2745" } }, { @@ -14451,7 +14555,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2743" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2756" } }, { @@ -14614,7 +14718,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2754" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2767" } }, { @@ -14641,7 +14745,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2765" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2778" } }, { @@ -14668,7 +14772,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2776" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2789" } }, { @@ -14767,7 +14871,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2787" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2800" } }, { @@ -14813,7 +14917,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2798" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2811" } }, { @@ -14913,7 +15017,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2809" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2822" } }, { @@ -15029,7 +15133,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2820" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2833" } }, { @@ -15077,7 +15181,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2831" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2844" } }, { @@ -15169,7 +15273,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2842" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2855" } }, { @@ -15284,7 +15388,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2853" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2866" } }, { @@ -15332,7 +15436,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2864" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2877" } }, { @@ -15369,7 +15473,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2875" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2888" } }, { @@ -15641,7 +15745,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2886" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2899" } }, { @@ -15689,7 +15793,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2897" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2910" } }, { @@ -15747,7 +15851,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2908" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2921" } }, { @@ -15952,7 +16056,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2919" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2932" } }, { @@ -16155,7 +16259,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2930" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2943" } }, { @@ -16324,7 +16428,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2941" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2954" } }, { @@ -16528,7 +16632,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2952" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2965" } }, { @@ -16695,7 +16799,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2963" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2976" } }, { @@ -16902,7 +17006,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2974" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2987" } }, { @@ -16970,7 +17074,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2985" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2998" } }, { @@ -17022,7 +17126,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L2996" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3009" } }, { @@ -17071,7 +17175,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3007" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3020" } }, { @@ -17162,7 +17266,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3018" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3031" } }, { @@ -17668,7 +17772,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3029" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3042" } }, { @@ -17774,7 +17878,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3040" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3053" } }, { @@ -17826,7 +17930,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3051" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3064" } }, { @@ -18378,7 +18482,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3062" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3075" } }, { @@ -18492,7 +18596,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3073" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3086" } }, { @@ -18589,7 +18693,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3084" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3097" } }, { @@ -18689,7 +18793,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3095" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3108" } }, { @@ -18777,7 +18881,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3106" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3119" } }, { @@ -18877,7 +18981,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3117" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3130" } }, { @@ -18964,7 +19068,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3128" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3141" } }, { @@ -19055,7 +19159,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3139" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3152" } }, { @@ -19180,7 +19284,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3150" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3163" } }, { @@ -19289,7 +19393,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3161" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3174" } }, { @@ -19359,7 +19463,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3172" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3185" } }, { @@ -19462,7 +19566,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3183" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3196" } }, { @@ -19523,7 +19627,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3194" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3207" } }, { @@ -19653,7 +19757,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3205" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3218" } }, { @@ -19760,7 +19864,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3216" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3229" } }, { @@ -19979,7 +20083,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3227" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3240" } }, { @@ -20056,7 +20160,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3238" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3251" } }, { @@ -20133,7 +20237,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3249" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3262" } }, { @@ -20242,7 +20346,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3260" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3273" } }, { @@ -20351,7 +20455,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3271" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3284" } }, { @@ -20412,7 +20516,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3282" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3295" } }, { @@ -20522,7 +20626,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3293" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3306" } }, { @@ -20583,7 +20687,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3304" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3317" } }, { @@ -20651,7 +20755,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3315" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3328" } }, { @@ -20719,7 +20823,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3326" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3339" } }, { @@ -20800,7 +20904,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3337" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3350" } }, { @@ -20954,7 +21058,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3348" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3361" } }, { @@ -21026,7 +21130,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3359" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3372" } }, { @@ -21190,7 +21294,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3370" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3383" } }, { @@ -21355,7 +21459,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3381" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3394" } }, { @@ -21425,7 +21529,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3392" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3405" } }, { @@ -21493,7 +21597,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3403" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3416" } }, { @@ -21586,7 +21690,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3414" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3427" } }, { @@ -21657,7 +21761,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3425" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3438" } }, { @@ -21858,7 +21962,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3436" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3449" } }, { @@ -21990,7 +22094,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3447" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3460" } }, { @@ -22093,7 +22197,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3458" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3471" } }, { @@ -22230,7 +22334,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3469" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3482" } }, { @@ -22341,7 +22445,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3480" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3493" } }, { @@ -22473,7 +22577,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3491" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3504" } }, { @@ -22604,7 +22708,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3502" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3515" } }, { @@ -22675,7 +22779,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3513" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3526" } }, { @@ -22759,7 +22863,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3524" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3537" } }, { @@ -22845,7 +22949,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3535" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3548" } }, { @@ -23028,7 +23132,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3546" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3559" } }, { @@ -23055,7 +23159,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3557" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3570" } }, { @@ -23108,7 +23212,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3568" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3581" } }, { @@ -23196,7 +23300,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3579" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3592" } }, { @@ -23647,7 +23751,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3590" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3603" } }, { @@ -23814,7 +23918,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3601" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3614" } }, { @@ -23912,7 +24016,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3612" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3625" } }, { @@ -24085,7 +24189,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3623" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3636" } }, { @@ -24183,7 +24287,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3634" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3647" } }, { @@ -24334,7 +24438,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3645" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3658" } }, { @@ -24419,7 +24523,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3656" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3669" } }, { @@ -24487,7 +24591,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3667" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3680" } }, { @@ -24539,7 +24643,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3678" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3691" } }, { @@ -24607,7 +24711,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3689" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3702" } }, { @@ -24768,7 +24872,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3700" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3713" } }, { @@ -24815,7 +24919,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3722" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3735" } }, { @@ -24862,7 +24966,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3733" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3746" } }, { @@ -24905,7 +25009,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3755" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3768" } }, { @@ -25001,7 +25105,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3766" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3779" } }, { @@ -25267,7 +25371,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3777" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3790" } }, { @@ -25290,7 +25394,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3788" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3801" } }, { @@ -25333,7 +25437,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3799" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3812" } }, { @@ -25384,7 +25488,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3810" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3823" } }, { @@ -25429,7 +25533,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3821" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3834" } }, { @@ -25457,7 +25561,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3832" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3845" } }, { @@ -25497,7 +25601,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3843" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3856" } }, { @@ -25556,7 +25660,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3854" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3867" } }, { @@ -25600,7 +25704,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3865" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3878" } }, { @@ -25659,7 +25763,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3876" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3889" } }, { @@ -25696,7 +25800,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3887" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3900" } }, { @@ -25740,7 +25844,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3898" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3911" } }, { @@ -25780,7 +25884,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3909" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3922" } }, { @@ -25855,7 +25959,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3920" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3933" } }, { @@ -26063,7 +26167,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3931" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3944" } }, { @@ -26107,7 +26211,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3942" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3955" } }, { @@ -26197,7 +26301,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3953" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3966" } }, { @@ -26224,7 +26328,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3964" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3977" } } ] diff --git a/build/openrpc/gateway.json b/build/openrpc/gateway.json index 0fb1576a48a..1f472eefaad 100644 --- a/build/openrpc/gateway.json +++ b/build/openrpc/gateway.json @@ -242,7 +242,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3975" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3988" } }, { @@ -473,7 +473,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3986" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3999" } }, { @@ -572,7 +572,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L3997" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4010" } }, { @@ -604,7 +604,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4008" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4021" } }, { @@ -710,7 +710,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4019" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4032" } }, { @@ -803,7 +803,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4030" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4043" } }, { @@ -887,7 +887,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4041" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4054" } }, { @@ -987,7 +987,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4052" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4065" } }, { @@ -1043,7 +1043,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4063" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4076" } }, { @@ -1116,7 +1116,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4074" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4087" } }, { @@ -1189,7 +1189,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4085" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4098" } }, { @@ -1236,7 +1236,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4096" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4109" } }, { @@ -1268,7 +1268,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4107" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4120" } }, { @@ -1305,7 +1305,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4129" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4142" } }, { @@ -1352,7 +1352,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4140" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4153" } }, { @@ -1392,7 +1392,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4151" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4164" } }, { @@ -1439,7 +1439,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4162" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4175" } }, { @@ -1494,7 +1494,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4173" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4186" } }, { @@ -1523,7 +1523,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4184" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4197" } }, { @@ -1660,7 +1660,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4195" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4208" } }, { @@ -1689,7 +1689,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4206" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4219" } }, { @@ -1743,7 +1743,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4217" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4230" } }, { @@ -1834,7 +1834,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4228" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4241" } }, { @@ -1862,7 +1862,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4239" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4252" } }, { @@ -1952,7 +1952,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4250" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4263" } }, { @@ -2208,7 +2208,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4261" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4274" } }, { @@ -2453,7 +2453,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4272" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4285" } }, { @@ -2729,7 +2729,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4283" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4296" } }, { @@ -3022,7 +3022,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4294" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4307" } }, { @@ -3078,7 +3078,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4305" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4318" } }, { @@ -3125,7 +3125,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4316" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4329" } }, { @@ -3223,7 +3223,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4327" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4340" } }, { @@ -3289,7 +3289,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4338" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4351" } }, { @@ -3355,7 +3355,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4349" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4362" } }, { @@ -3464,7 +3464,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4360" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4373" } }, { @@ -3522,7 +3522,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4371" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4384" } }, { @@ -3644,7 +3644,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4382" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4395" } }, { @@ -3836,7 +3836,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4393" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4406" } }, { @@ -4045,7 +4045,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4404" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4417" } }, { @@ -4136,7 +4136,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4415" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4428" } }, { @@ -4194,7 +4194,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4426" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4439" } }, { @@ -4452,7 +4452,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4437" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4450" } }, { @@ -4727,7 +4727,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4448" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4461" } }, { @@ -4755,7 +4755,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4459" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4472" } }, { @@ -4793,7 +4793,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4470" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4483" } }, { @@ -4901,7 +4901,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4481" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4494" } }, { @@ -4939,7 +4939,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4492" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4505" } }, { @@ -4968,7 +4968,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4503" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4516" } }, { @@ -5031,7 +5031,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4514" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4527" } }, { @@ -5094,7 +5094,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4525" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4538" } }, { @@ -5139,7 +5139,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4536" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4549" } }, { @@ -5261,7 +5261,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4547" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4560" } }, { @@ -5437,7 +5437,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4558" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4571" } }, { @@ -5592,7 +5592,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4569" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4582" } }, { @@ -5714,7 +5714,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4580" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4593" } }, { @@ -5768,7 +5768,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4591" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4604" } }, { @@ -5822,7 +5822,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4602" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4615" } }, { @@ -5885,7 +5885,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4613" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4626" } }, { @@ -5987,7 +5987,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4624" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4637" } }, { @@ -6210,7 +6210,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4635" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4648" } }, { @@ -6393,7 +6393,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4646" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4659" } }, { @@ -6587,7 +6587,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4657" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4670" } }, { @@ -6633,7 +6633,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4668" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4681" } }, { @@ -6783,7 +6783,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4679" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4692" } }, { @@ -6920,7 +6920,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4690" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4703" } }, { @@ -6988,7 +6988,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4701" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4714" } }, { @@ -7105,7 +7105,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4712" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4725" } }, { @@ -7196,7 +7196,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4723" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4736" } }, { @@ -7282,7 +7282,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4734" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4747" } }, { @@ -7309,7 +7309,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4745" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4758" } }, { @@ -7336,7 +7336,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4756" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4769" } }, { @@ -7404,7 +7404,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4767" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4780" } }, { @@ -7910,7 +7910,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4778" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4791" } }, { @@ -8007,7 +8007,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4789" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4802" } }, { @@ -8107,7 +8107,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4800" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4813" } }, { @@ -8207,7 +8207,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4811" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4824" } }, { @@ -8332,7 +8332,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4822" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4835" } }, { @@ -8441,7 +8441,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4833" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4846" } }, { @@ -8544,7 +8544,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4844" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4857" } }, { @@ -8674,7 +8674,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4855" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4868" } }, { @@ -8781,7 +8781,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4866" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4879" } }, { @@ -8842,7 +8842,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4877" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4890" } }, { @@ -8910,7 +8910,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4888" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4901" } }, { @@ -8991,7 +8991,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4899" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4912" } }, { @@ -9155,7 +9155,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4910" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4923" } }, { @@ -9248,7 +9248,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4921" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4934" } }, { @@ -9449,7 +9449,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4932" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4945" } }, { @@ -9560,7 +9560,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4943" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4956" } }, { @@ -9691,7 +9691,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4954" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4967" } }, { @@ -9777,7 +9777,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4965" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4978" } }, { @@ -9804,7 +9804,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4976" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4989" } }, { @@ -9857,7 +9857,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4987" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5000" } }, { @@ -9945,7 +9945,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L4998" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5011" } }, { @@ -10396,7 +10396,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5009" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5022" } }, { @@ -10563,7 +10563,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5020" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5033" } }, { @@ -10736,7 +10736,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5031" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5044" } }, { @@ -10804,7 +10804,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5042" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5055" } }, { @@ -10872,7 +10872,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5053" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5066" } }, { @@ -11033,7 +11033,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5064" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5077" } }, { @@ -11078,7 +11078,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5086" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5099" } }, { @@ -11123,7 +11123,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5097" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5110" } }, { @@ -11150,7 +11150,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5108" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5121" } } ] diff --git a/build/openrpc/miner.json b/build/openrpc/miner.json index 1ac9768be0a..297cb18f5c6 100644 --- a/build/openrpc/miner.json +++ b/build/openrpc/miner.json @@ -30,7 +30,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5394" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5407" } }, { @@ -109,7 +109,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5405" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5418" } }, { @@ -155,7 +155,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5416" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5429" } }, { @@ -203,7 +203,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5427" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5440" } }, { @@ -251,7 +251,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5438" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5451" } }, { @@ -354,7 +354,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5449" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5462" } }, { @@ -428,7 +428,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5460" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5473" } }, { @@ -591,7 +591,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5471" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5484" } }, { @@ -742,7 +742,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5482" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5495" } }, { @@ -781,7 +781,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5493" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5506" } }, { @@ -913,7 +913,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5504" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5517" } }, { @@ -945,7 +945,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5515" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5528" } }, { @@ -986,7 +986,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5526" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5539" } }, { @@ -1054,7 +1054,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5537" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5550" } }, { @@ -1185,7 +1185,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5548" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5561" } }, { @@ -1316,7 +1316,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5559" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5572" } }, { @@ -1416,7 +1416,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5570" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5583" } }, { @@ -1516,7 +1516,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5581" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5594" } }, { @@ -1616,7 +1616,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5592" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5605" } }, { @@ -1716,7 +1716,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5603" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5616" } }, { @@ -1816,7 +1816,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5614" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5627" } }, { @@ -1916,7 +1916,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5625" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5638" } }, { @@ -2040,7 +2040,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5636" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5649" } }, { @@ -2164,7 +2164,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5647" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5660" } }, { @@ -2279,7 +2279,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5658" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5671" } }, { @@ -2379,7 +2379,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5669" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5682" } }, { @@ -2512,7 +2512,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5680" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5693" } }, { @@ -2636,7 +2636,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5691" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5704" } }, { @@ -2760,7 +2760,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5702" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5715" } }, { @@ -2884,7 +2884,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5713" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5726" } }, { @@ -3017,7 +3017,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5724" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5737" } }, { @@ -3117,7 +3117,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5735" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5748" } }, { @@ -3157,7 +3157,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5746" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5759" } }, { @@ -3229,7 +3229,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5757" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5770" } }, { @@ -3279,7 +3279,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5768" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5781" } }, { @@ -3323,7 +3323,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5779" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5792" } }, { @@ -3364,7 +3364,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5790" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5803" } }, { @@ -3608,7 +3608,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5801" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5814" } }, { @@ -3682,7 +3682,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5812" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5825" } }, { @@ -3732,7 +3732,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5823" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5836" } }, { @@ -3761,7 +3761,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5834" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5847" } }, { @@ -3790,7 +3790,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5845" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5858" } }, { @@ -3846,7 +3846,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5856" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5869" } }, { @@ -3869,7 +3869,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5867" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5880" } }, { @@ -3929,7 +3929,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5878" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5891" } }, { @@ -3968,7 +3968,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5889" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5902" } }, { @@ -4008,7 +4008,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5900" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5913" } }, { @@ -4081,7 +4081,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5911" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5924" } }, { @@ -4145,7 +4145,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5922" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5935" } }, { @@ -4208,7 +4208,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5933" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5946" } }, { @@ -4258,7 +4258,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5944" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5957" } }, { @@ -4817,7 +4817,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5955" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5968" } }, { @@ -4858,7 +4858,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5966" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5979" } }, { @@ -4899,7 +4899,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5977" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5990" } }, { @@ -4940,7 +4940,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5988" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6001" } }, { @@ -4981,7 +4981,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L5999" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6012" } }, { @@ -5022,7 +5022,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6010" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6023" } }, { @@ -5053,7 +5053,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6021" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6034" } }, { @@ -5103,7 +5103,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6032" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6045" } }, { @@ -5144,7 +5144,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6043" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6056" } }, { @@ -5183,7 +5183,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6054" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6067" } }, { @@ -5247,7 +5247,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6065" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6078" } }, { @@ -5305,7 +5305,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6076" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6089" } }, { @@ -5752,7 +5752,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6087" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6100" } }, { @@ -5788,7 +5788,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6098" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6111" } }, { @@ -5931,7 +5931,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6109" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6122" } }, { @@ -5987,7 +5987,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6120" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6133" } }, { @@ -6026,7 +6026,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6131" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6144" } }, { @@ -6203,7 +6203,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6142" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6155" } }, { @@ -6255,7 +6255,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6153" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6166" } }, { @@ -6447,7 +6447,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6164" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6177" } }, { @@ -6547,7 +6547,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6175" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6188" } }, { @@ -6601,7 +6601,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6186" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6199" } }, { @@ -6640,7 +6640,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6197" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6210" } }, { @@ -6725,7 +6725,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6208" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6221" } }, { @@ -6919,7 +6919,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6219" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6232" } }, { @@ -7017,7 +7017,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6230" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6243" } }, { @@ -7149,7 +7149,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6241" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6254" } }, { @@ -7203,7 +7203,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6252" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6265" } }, { @@ -7237,7 +7237,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6263" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6276" } }, { @@ -7324,7 +7324,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6274" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6287" } }, { @@ -7378,7 +7378,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6285" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6298" } }, { @@ -7478,7 +7478,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6296" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6309" } }, { @@ -7555,7 +7555,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6307" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6320" } }, { @@ -7646,7 +7646,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6318" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6331" } }, { @@ -7685,7 +7685,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6329" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6342" } }, { @@ -7801,7 +7801,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6340" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6353" } }, { @@ -9901,7 +9901,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6351" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6364" } } ] diff --git a/build/openrpc/worker.json b/build/openrpc/worker.json index 77c70b69cb0..3a06e9ba6ad 100644 --- a/build/openrpc/worker.json +++ b/build/openrpc/worker.json @@ -161,7 +161,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6439" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6452" } }, { @@ -252,7 +252,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6450" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6463" } }, { @@ -420,7 +420,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6461" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6474" } }, { @@ -447,7 +447,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6472" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6485" } }, { @@ -597,7 +597,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6483" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6496" } }, { @@ -700,7 +700,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6494" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6507" } }, { @@ -803,7 +803,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6505" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6518" } }, { @@ -925,7 +925,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6516" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6529" } }, { @@ -1135,7 +1135,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6527" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6540" } }, { @@ -1306,7 +1306,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6538" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6551" } }, { @@ -3350,7 +3350,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6549" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6562" } }, { @@ -3470,7 +3470,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6560" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6573" } }, { @@ -3531,7 +3531,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6571" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6584" } }, { @@ -3569,7 +3569,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6582" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6595" } }, { @@ -3729,7 +3729,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6593" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6606" } }, { @@ -3913,7 +3913,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6604" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6617" } }, { @@ -4054,7 +4054,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6615" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6628" } }, { @@ -4107,7 +4107,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6626" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6639" } }, { @@ -4250,7 +4250,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6637" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6650" } }, { @@ -4474,7 +4474,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6648" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6661" } }, { @@ -4601,7 +4601,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6659" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6672" } }, { @@ -4768,7 +4768,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6670" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6683" } }, { @@ -4895,7 +4895,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6681" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6694" } }, { @@ -4933,7 +4933,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6692" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6705" } }, { @@ -4972,7 +4972,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6703" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6716" } }, { @@ -4995,7 +4995,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6714" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6727" } }, { @@ -5034,7 +5034,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6725" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6738" } }, { @@ -5057,7 +5057,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6736" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6749" } }, { @@ -5096,7 +5096,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6747" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6760" } }, { @@ -5130,7 +5130,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6758" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6771" } }, { @@ -5184,7 +5184,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6769" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6782" } }, { @@ -5223,7 +5223,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6780" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6793" } }, { @@ -5262,7 +5262,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6791" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6804" } }, { @@ -5297,7 +5297,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6802" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6815" } }, { @@ -5477,7 +5477,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6813" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6826" } }, { @@ -5506,7 +5506,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6824" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6837" } }, { @@ -5529,7 +5529,7 @@ "deprecated": false, "externalDocs": { "description": "Github remote link", - "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6835" + "url": "https://github.com/filecoin-project/lotus/blob/master/api/proxy_gen.go#L6848" } } ] diff --git a/chain/ethhashlookup/eth_transaction_hash_lookup.go b/chain/ethhashlookup/eth_transaction_hash_lookup.go deleted file mode 100644 index 2a34e37aa03..00000000000 --- a/chain/ethhashlookup/eth_transaction_hash_lookup.go +++ /dev/null @@ -1,150 +0,0 @@ -package ethhashlookup - -import ( - "context" - "database/sql" - "errors" - "strconv" - - "github.com/ipfs/go-cid" - _ "github.com/mattn/go-sqlite3" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/chain/types/ethtypes" - "github.com/filecoin-project/lotus/lib/sqlite" -) - -const DefaultDbFilename = "txhash.db" - -var ErrNotFound = errors.New("not found") - -var ddls = []string{ - `CREATE TABLE IF NOT EXISTS eth_tx_hashes ( - hash TEXT PRIMARY KEY NOT NULL, - cid TEXT NOT NULL UNIQUE, - insertion_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL - )`, - - `CREATE INDEX IF NOT EXISTS insertion_time_index ON eth_tx_hashes (insertion_time)`, -} - -const ( - insertTxHash = `INSERT INTO eth_tx_hashes (hash, cid) VALUES(?, ?) ON CONFLICT (hash) DO UPDATE SET insertion_time = CURRENT_TIMESTAMP` - getCidFromHash = `SELECT cid FROM eth_tx_hashes WHERE hash = ?` - getHashFromCid = `SELECT hash FROM eth_tx_hashes WHERE cid = ?` - deleteOlderThan = `DELETE FROM eth_tx_hashes WHERE insertion_time < datetime('now', ?);` -) - -type EthTxHashLookup struct { - db *sql.DB - - stmtInsertTxHash *sql.Stmt - stmtGetCidFromHash *sql.Stmt - stmtGetHashFromCid *sql.Stmt - stmtDeleteOlderThan *sql.Stmt -} - -func NewTransactionHashLookup(ctx context.Context, path string) (*EthTxHashLookup, error) { - db, _, err := sqlite.Open(path) - if err != nil { - return nil, xerrors.Errorf("failed to setup eth transaction hash lookup db: %w", err) - } - - if err := sqlite.InitDb(ctx, "eth transaction hash lookup", db, ddls, []sqlite.MigrationFunc{}); err != nil { - _ = db.Close() - return nil, xerrors.Errorf("failed to init eth transaction hash lookup db: %w", err) - } - - ei := &EthTxHashLookup{db: db} - - if err = ei.initStatements(); err != nil { - _ = ei.Close() - return nil, xerrors.Errorf("error preparing eth transaction hash lookup db statements: %w", err) - } - - return ei, nil -} - -func (ei *EthTxHashLookup) initStatements() (err error) { - ei.stmtInsertTxHash, err = ei.db.Prepare(insertTxHash) - if err != nil { - return xerrors.Errorf("prepare stmtInsertTxHash: %w", err) - } - ei.stmtGetCidFromHash, err = ei.db.Prepare(getCidFromHash) - if err != nil { - return xerrors.Errorf("prepare stmtGetCidFromHash: %w", err) - } - ei.stmtGetHashFromCid, err = ei.db.Prepare(getHashFromCid) - if err != nil { - return xerrors.Errorf("prepare stmtGetHashFromCid: %w", err) - } - ei.stmtDeleteOlderThan, err = ei.db.Prepare(deleteOlderThan) - if err != nil { - return xerrors.Errorf("prepare stmtDeleteOlderThan: %w", err) - } - return nil -} - -func (ei *EthTxHashLookup) UpsertHash(txHash ethtypes.EthHash, c cid.Cid) error { - if ei.db == nil { - return xerrors.New("db closed") - } - - _, err := ei.stmtInsertTxHash.Exec(txHash.String(), c.String()) - return err -} - -func (ei *EthTxHashLookup) GetCidFromHash(txHash ethtypes.EthHash) (cid.Cid, error) { - if ei.db == nil { - return cid.Undef, xerrors.New("db closed") - } - - row := ei.stmtGetCidFromHash.QueryRow(txHash.String()) - var c string - err := row.Scan(&c) - if err != nil { - if err == sql.ErrNoRows { - return cid.Undef, ErrNotFound - } - return cid.Undef, err - } - return cid.Decode(c) -} - -func (ei *EthTxHashLookup) GetHashFromCid(c cid.Cid) (ethtypes.EthHash, error) { - if ei.db == nil { - return ethtypes.EmptyEthHash, xerrors.New("db closed") - } - - row := ei.stmtGetHashFromCid.QueryRow(c.String()) - var hashString string - err := row.Scan(&c) - if err != nil { - if err == sql.ErrNoRows { - return ethtypes.EmptyEthHash, ErrNotFound - } - return ethtypes.EmptyEthHash, err - } - return ethtypes.ParseEthHash(hashString) -} - -func (ei *EthTxHashLookup) DeleteEntriesOlderThan(days int) (int64, error) { - if ei.db == nil { - return 0, xerrors.New("db closed") - } - - res, err := ei.stmtDeleteOlderThan.Exec("-" + strconv.Itoa(days) + " day") - if err != nil { - return 0, err - } - return res.RowsAffected() -} - -func (ei *EthTxHashLookup) Close() (err error) { - if ei.db == nil { - return nil - } - db := ei.db - ei.db = nil - return db.Close() -} diff --git a/chain/events/filter/event.go b/chain/events/filter/event.go index 8592dfb423e..ccf3bd0c941 100644 --- a/chain/events/filter/event.go +++ b/chain/events/filter/event.go @@ -16,6 +16,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" + "github.com/filecoin-project/lotus/chain/index" cstore "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" ) @@ -32,7 +33,7 @@ type AddressResolver func(context.Context, abi.ActorID, *types.TipSet) (address. type EventFilter interface { Filter - TakeCollectedEvents(context.Context) []*CollectedEvent + TakeCollectedEvents(context.Context) []*index.CollectedEvent CollectEvents(context.Context, *TipSetEvents, bool, AddressResolver) error } @@ -47,24 +48,13 @@ type eventFilter struct { maxResults int // maximum number of results to collect, 0 is unlimited mu sync.Mutex - collected []*CollectedEvent + collected []*index.CollectedEvent lastTaken time.Time ch chan<- interface{} } var _ Filter = (*eventFilter)(nil) -type CollectedEvent struct { - Entries []types.EventEntry - EmitterAddr address.Address // address of emitter - EventIdx int // index of the event within the list of emitted events in a given tipset - Reverted bool - Height abi.ChainEpoch - TipSetKey types.TipSetKey // tipset that contained the message - MsgIdx int // index of the message in the tipset - MsgCid cid.Cid // cid of message that produced event -} - func (f *eventFilter) ID() types.FilterID { return f.id } @@ -119,7 +109,7 @@ func (f *eventFilter) CollectEvents(ctx context.Context, te *TipSetEvents, rever } // event matches filter, so record it - cev := &CollectedEvent{ + cev := &index.CollectedEvent{ Entries: ev.Entries, EmitterAddr: addr, EventIdx: eventCount, @@ -151,13 +141,13 @@ func (f *eventFilter) CollectEvents(ctx context.Context, te *TipSetEvents, rever return nil } -func (f *eventFilter) setCollectedEvents(ces []*CollectedEvent) { +func (f *eventFilter) setCollectedEvents(ces []*index.CollectedEvent) { f.mu.Lock() f.collected = ces f.mu.Unlock() } -func (f *eventFilter) TakeCollectedEvents(ctx context.Context) []*CollectedEvent { +func (f *eventFilter) TakeCollectedEvents(ctx context.Context) []*index.CollectedEvent { f.mu.Lock() collected := f.collected f.collected = nil @@ -307,7 +297,7 @@ type EventFilterManager struct { ChainStore *cstore.ChainStore AddressResolver func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool) MaxFilterResults int - EventIndex *EventIndex + ChainIndexer index.Indexer mu sync.Mutex // guards mutations to filters filters map[types.FilterID]EventFilter @@ -319,7 +309,7 @@ func (m *EventFilterManager) Apply(ctx context.Context, from, to *types.TipSet) defer m.mu.Unlock() m.currentHeight = to.Height() - if len(m.filters) == 0 && m.EventIndex == nil { + if len(m.filters) == 0 { return nil } @@ -329,12 +319,6 @@ func (m *EventFilterManager) Apply(ctx context.Context, from, to *types.TipSet) load: m.loadExecutedMessages, } - if m.EventIndex != nil { - if err := m.EventIndex.CollectEvents(ctx, tse, false, m.AddressResolver); err != nil { - return err - } - } - // TODO: could run this loop in parallel with errgroup if there are many filters for _, f := range m.filters { if err := f.CollectEvents(ctx, tse, false, m.AddressResolver); err != nil { @@ -350,7 +334,7 @@ func (m *EventFilterManager) Revert(ctx context.Context, from, to *types.TipSet) defer m.mu.Unlock() m.currentHeight = to.Height() - if len(m.filters) == 0 && m.EventIndex == nil { + if len(m.filters) == 0 { return nil } @@ -360,12 +344,6 @@ func (m *EventFilterManager) Revert(ctx context.Context, from, to *types.TipSet) load: m.loadExecutedMessages, } - if m.EventIndex != nil { - if err := m.EventIndex.CollectEvents(ctx, tse, true, m.AddressResolver); err != nil { - return err - } - } - // TODO: could run this loop in parallel with errgroup if there are many filters for _, f := range m.filters { if err := f.CollectEvents(ctx, tse, true, m.AddressResolver); err != nil { @@ -392,7 +370,7 @@ func (m *EventFilterManager) Fill( currentHeight := m.currentHeight m.mu.Unlock() - if m.EventIndex == nil && minHeight != -1 && minHeight < currentHeight { + if m.ChainIndexer == nil && minHeight != -1 && minHeight < currentHeight { return nil, xerrors.Errorf("historic event index disabled") } @@ -411,12 +389,22 @@ func (m *EventFilterManager) Fill( maxResults: m.MaxFilterResults, } - if m.EventIndex != nil && minHeight != -1 && minHeight < currentHeight { - // Filter needs historic events - excludeReverted := tipsetCid == cid.Undef - if err := m.EventIndex.prefillFilter(ctx, f, excludeReverted); err != nil { - return nil, err + if m.ChainIndexer != nil && minHeight != -1 && minHeight < currentHeight { + ef := &index.EventFilter{ + MinHeight: minHeight, + MaxHeight: maxHeight, + TipsetCid: tipsetCid, + Addresses: addresses, + KeysWithCodec: keysWithCodec, + MaxResults: m.MaxFilterResults, } + + ces, err := m.ChainIndexer.GetEventsForFilter(ctx, ef) + if err != nil { + return nil, xerrors.Errorf("get events for filter: %w", err) + } + + f.setCollectedEvents(ces) } return f, nil diff --git a/chain/events/filter/event_test.go b/chain/events/filter/event_test.go index c650b71eb6f..5ffb678c65e 100644 --- a/chain/events/filter/event_test.go +++ b/chain/events/filter/event_test.go @@ -19,6 +19,7 @@ import ( "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/types" ) @@ -70,8 +71,8 @@ func TestEventFilterCollectEvents(t *testing.T) { cid14000, err := events14000.msgTs.Key().Cid() require.NoError(t, err, "tipset cid") - noCollectedEvents := []*CollectedEvent{} - oneCollectedEvent := []*CollectedEvent{ + noCollectedEvents := []*index.CollectedEvent{} + oneCollectedEvent := []*index.CollectedEvent{ { Entries: ev1.Entries, EmitterAddr: a1, @@ -88,7 +89,7 @@ func TestEventFilterCollectEvents(t *testing.T) { name string filter *eventFilter te *TipSetEvents - want []*CollectedEvent + want []*index.CollectedEvent }{ { name: "nomatch tipset min height", diff --git a/chain/events/filter/index.go b/chain/events/filter/index.go deleted file mode 100644 index ff7f1aeaa7e..00000000000 --- a/chain/events/filter/index.go +++ /dev/null @@ -1,672 +0,0 @@ -package filter - -import ( - "context" - "database/sql" - "errors" - "fmt" - "sort" - "strings" - "sync" - - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - _ "github.com/mattn/go-sqlite3" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/sqlite" -) - -const DefaultDbFilename = "events.db" - -// Any changes to this schema should be matched for the `lotus-shed indexes backfill-events` command - -var ddls = []string{ - `CREATE TABLE IF NOT EXISTS event ( - id INTEGER PRIMARY KEY, - height INTEGER NOT NULL, - tipset_key BLOB NOT NULL, - tipset_key_cid BLOB NOT NULL, - emitter_addr BLOB NOT NULL, - event_index INTEGER NOT NULL, - message_cid BLOB NOT NULL, - message_index INTEGER NOT NULL, - reverted INTEGER NOT NULL - )`, - - createIndexEventTipsetKeyCid, - createIndexEventHeight, - - `CREATE TABLE IF NOT EXISTS event_entry ( - event_id INTEGER, - indexed INTEGER NOT NULL, - flags BLOB NOT NULL, - key TEXT NOT NULL, - codec INTEGER, - value BLOB NOT NULL - )`, - - createTableEventsSeen, - - createIndexEventEntryEventId, - createIndexEventsSeenHeight, - createIndexEventsSeenTipsetKeyCid, -} - -var ( - log = logging.Logger("filter") -) - -const ( - createTableEventsSeen = `CREATE TABLE IF NOT EXISTS events_seen ( - id INTEGER PRIMARY KEY, - height INTEGER NOT NULL, - tipset_key_cid BLOB NOT NULL, - reverted INTEGER NOT NULL, - UNIQUE(height, tipset_key_cid) - )` - - // When modifying indexes in this file, it is critical to test the query plan (EXPLAIN QUERY PLAN) - // of all the variations of queries built by prefillFilter to ensure that the query first hits - // an index that narrows down results to an epoch or a reasonable range of epochs. Specifically, - // event_tipset_key_cid or event_height should be the first index. Then further narrowing can take - // place within the small subset of results. - // Unfortunately SQLite has some quirks in index selection that mean that certain query types will - // bypass these indexes if alternatives are available. This has been observed specifically on - // queries with height ranges: `height>=X AND height<=Y`. - // - // e.g. we want to see that `event_height` is the first index used in this query: - // - // EXPLAIN QUERY PLAN - // SELECT - // event.height, event.tipset_key_cid, event_entry.indexed, event_entry.codec, event_entry.key, event_entry.value - // FROM event - // JOIN - // event_entry ON event.id=event_entry.event_id, - // event_entry ee2 ON event.id=ee2.event_id - // WHERE event.height>=? AND event.height<=? AND event.reverted=? AND event.emitter_addr=? AND ee2.indexed=1 AND ee2.key=? - // ORDER BY event.height DESC, event_entry._rowid_ ASC - // - // -> - // - // QUERY PLAN - // |--SEARCH event USING INDEX event_height (height>? AND height 0 FROM events_seen WHERE tipset_key_cid=?`, // QUERY PLAN: SEARCH events_seen USING COVERING INDEX events_seen_tipset_key_cid (tipset_key_cid=?) - &ps.getMaxHeightInIndex: `SELECT MAX(height) FROM events_seen`, // QUERY PLAN: SEARCH events_seen USING COVERING INDEX events_seen_height - &ps.isHeightProcessed: `SELECT COUNT(*) > 0 FROM events_seen WHERE height=?`, // QUERY PLAN: SEARCH events_seen USING COVERING INDEX events_seen_height (height=?) - - } -} - -type preparedStatements struct { - insertEvent *sql.Stmt - insertEntry *sql.Stmt - revertEventsInTipset *sql.Stmt - restoreEvent *sql.Stmt - upsertEventsSeen *sql.Stmt - revertEventSeen *sql.Stmt - restoreEventSeen *sql.Stmt - eventExists *sql.Stmt - isTipsetProcessed *sql.Stmt - getMaxHeightInIndex *sql.Stmt - isHeightProcessed *sql.Stmt -} - -type EventIndex struct { - db *sql.DB - - stmt *preparedStatements - - mu sync.Mutex - subIdCounter uint64 - updateSubs map[uint64]*updateSub -} - -type updateSub struct { - ctx context.Context - ch chan EventIndexUpdated - cancel context.CancelFunc -} - -type EventIndexUpdated struct{} - -func NewEventIndex(ctx context.Context, path string, chainStore *store.ChainStore) (*EventIndex, error) { - db, _, err := sqlite.Open(path) - if err != nil { - return nil, xerrors.Errorf("failed to setup event index db: %w", err) - } - - err = sqlite.InitDb(ctx, "event index", db, ddls, []sqlite.MigrationFunc{ - migrationVersion2(db, chainStore), - migrationVersion3, - migrationVersion4, - migrationVersion5, - migrationVersion6, - migrationVersion7, - }) - if err != nil { - _ = db.Close() - return nil, xerrors.Errorf("failed to setup event index db: %w", err) - } - - eventIndex := EventIndex{ - db: db, - stmt: &preparedStatements{}, - } - - if err = eventIndex.initStatements(); err != nil { - _ = db.Close() - return nil, xerrors.Errorf("error preparing eventIndex database statements: %w", err) - } - - eventIndex.updateSubs = make(map[uint64]*updateSub) - - return &eventIndex, nil -} - -func (ei *EventIndex) initStatements() error { - stmtMapping := preparedStatementMapping(ei.stmt) - for stmtPointer, query := range stmtMapping { - var err error - *stmtPointer, err = ei.db.Prepare(query) - if err != nil { - return xerrors.Errorf("prepare statement [%s]: %w", query, err) - } - } - - return nil -} - -func (ei *EventIndex) Close() error { - if ei.db == nil { - return nil - } - return ei.db.Close() -} - -func (ei *EventIndex) SubscribeUpdates() (chan EventIndexUpdated, func()) { - subCtx, subCancel := context.WithCancel(context.Background()) - ch := make(chan EventIndexUpdated) - - tSub := &updateSub{ - ctx: subCtx, - cancel: subCancel, - ch: ch, - } - - ei.mu.Lock() - subId := ei.subIdCounter - ei.subIdCounter++ - ei.updateSubs[subId] = tSub - ei.mu.Unlock() - - unSubscribeF := func() { - ei.mu.Lock() - tSub, ok := ei.updateSubs[subId] - if !ok { - ei.mu.Unlock() - return - } - delete(ei.updateSubs, subId) - ei.mu.Unlock() - - // cancel the subscription - tSub.cancel() - } - - return tSub.ch, unSubscribeF -} - -func (ei *EventIndex) GetMaxHeightInIndex(ctx context.Context) (uint64, error) { - row := ei.stmt.getMaxHeightInIndex.QueryRowContext(ctx) - var maxHeight uint64 - err := row.Scan(&maxHeight) - return maxHeight, err -} - -func (ei *EventIndex) IsHeightPast(ctx context.Context, height uint64) (bool, error) { - maxHeight, err := ei.GetMaxHeightInIndex(ctx) - if err != nil { - return false, err - } - return height <= maxHeight, nil -} - -func (ei *EventIndex) IsTipsetProcessed(ctx context.Context, tipsetKeyCid []byte) (bool, error) { - row := ei.stmt.isTipsetProcessed.QueryRowContext(ctx, tipsetKeyCid) - var exists bool - err := row.Scan(&exists) - return exists, err -} - -func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, revert bool, resolver func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool)) error { - tx, err := ei.db.BeginTx(ctx, nil) - if err != nil { - return xerrors.Errorf("begin transaction: %w", err) - } - // rollback the transaction (a no-op if the transaction was already committed) - defer func() { _ = tx.Rollback() }() - - tsKeyCid, err := te.msgTs.Key().Cid() - if err != nil { - return xerrors.Errorf("tipset key cid: %w", err) - } - - // lets handle the revert case first, since its simpler and we can simply mark all events in this tipset as reverted and return - if revert { - _, err = tx.Stmt(ei.stmt.revertEventsInTipset).Exec(te.msgTs.Height(), te.msgTs.Key().Bytes()) - if err != nil { - return xerrors.Errorf("revert event: %w", err) - } - - _, err = tx.Stmt(ei.stmt.revertEventSeen).Exec(te.msgTs.Height(), tsKeyCid.Bytes()) - if err != nil { - return xerrors.Errorf("revert event seen: %w", err) - } - - err = tx.Commit() - if err != nil { - return xerrors.Errorf("commit transaction: %w", err) - } - - ei.mu.Lock() - tSubs := make([]*updateSub, 0, len(ei.updateSubs)) - for _, tSub := range ei.updateSubs { - tSubs = append(tSubs, tSub) - } - ei.mu.Unlock() - - for _, tSub := range tSubs { - tSub := tSub - select { - case tSub.ch <- EventIndexUpdated{}: - case <-tSub.ctx.Done(): - // subscription was cancelled, ignore - case <-ctx.Done(): - return ctx.Err() - } - } - - return nil - } - - // cache of lookups between actor id and f4 address - addressLookups := make(map[abi.ActorID]address.Address) - - ems, err := te.messages(ctx) - if err != nil { - return xerrors.Errorf("load executed messages: %w", err) - } - - eventCount := 0 - // iterate over all executed messages in this tipset and insert them into the database if they - // don't exist, otherwise mark them as not reverted - for msgIdx, em := range ems { - for _, ev := range em.Events() { - addr, found := addressLookups[ev.Emitter] - if !found { - var ok bool - addr, ok = resolver(ctx, ev.Emitter, te.rctTs) - if !ok { - // not an address we will be able to match against - continue - } - addressLookups[ev.Emitter] = addr - } - - // check if this event already exists in the database - var entryID sql.NullInt64 - err = tx.Stmt(ei.stmt.eventExists).QueryRow( - te.msgTs.Height(), // height - te.msgTs.Key().Bytes(), // tipset_key - tsKeyCid.Bytes(), // tipset_key_cid - addr.Bytes(), // emitter_addr - eventCount, // event_index - em.Message().Cid().Bytes(), // message_cid - msgIdx, // message_index - ).Scan(&entryID) - if err != nil { - return xerrors.Errorf("error checking if event exists: %w", err) - } - - if !entryID.Valid { - // event does not exist, lets insert it - res, err := tx.Stmt(ei.stmt.insertEvent).Exec( - te.msgTs.Height(), // height - te.msgTs.Key().Bytes(), // tipset_key - tsKeyCid.Bytes(), // tipset_key_cid - addr.Bytes(), // emitter_addr - eventCount, // event_index - em.Message().Cid().Bytes(), // message_cid - msgIdx, // message_index - false, // reverted - ) - if err != nil { - return xerrors.Errorf("exec insert event: %w", err) - } - - entryID.Int64, err = res.LastInsertId() - if err != nil { - return xerrors.Errorf("get last row id: %w", err) - } - - // insert all the entries for this event - for _, entry := range ev.Entries { - _, err = tx.Stmt(ei.stmt.insertEntry).Exec( - entryID.Int64, // event_id - isIndexedValue(entry.Flags), // indexed - []byte{entry.Flags}, // flags - entry.Key, // key - entry.Codec, // codec - entry.Value, // value - ) - if err != nil { - return xerrors.Errorf("exec insert entry: %w", err) - } - } - } else { - // event already exists, lets mark it as not reverted - res, err := tx.Stmt(ei.stmt.restoreEvent).Exec( - te.msgTs.Height(), // height - te.msgTs.Key().Bytes(), // tipset_key - tsKeyCid.Bytes(), // tipset_key_cid - addr.Bytes(), // emitter_addr - eventCount, // event_index - em.Message().Cid().Bytes(), // message_cid - msgIdx, // message_index - ) - if err != nil { - return xerrors.Errorf("exec restore event: %w", err) - } - - rowsAffected, err := res.RowsAffected() - if err != nil { - return xerrors.Errorf("error getting rows affected: %s", err) - } - - // this is a sanity check as we should only ever be updating one event - if rowsAffected != 1 { - log.Warnf("restored %d events but expected only one to exist", rowsAffected) - } - } - eventCount++ - } - } - - // this statement will mark the tipset as processed and will insert a new row if it doesn't exist - // or update the reverted field to false if it does - _, err = tx.Stmt(ei.stmt.upsertEventsSeen).Exec( - te.msgTs.Height(), - tsKeyCid.Bytes(), - ) - if err != nil { - return xerrors.Errorf("exec upsert events seen: %w", err) - } - - err = tx.Commit() - if err != nil { - return xerrors.Errorf("commit transaction: %w", err) - } - - ei.mu.Lock() - tSubs := make([]*updateSub, 0, len(ei.updateSubs)) - for _, tSub := range ei.updateSubs { - tSubs = append(tSubs, tSub) - } - ei.mu.Unlock() - - for _, tSub := range tSubs { - tSub := tSub - select { - case tSub.ch <- EventIndexUpdated{}: - case <-tSub.ctx.Done(): - // subscription was cancelled, ignore - case <-ctx.Done(): - return ctx.Err() - } - } - - return nil -} - -// prefillFilter fills a filter's collection of events from the historic index -func (ei *EventIndex) prefillFilter(ctx context.Context, f *eventFilter, excludeReverted bool) error { - values, query := makePrefillFilterQuery(f, excludeReverted) - - stmt, err := ei.db.Prepare(query) - if err != nil { - return xerrors.Errorf("prepare prefill query: %w", err) - } - defer func() { _ = stmt.Close() }() - - q, err := stmt.QueryContext(ctx, values...) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil - } - return xerrors.Errorf("exec prefill query: %w", err) - } - defer func() { _ = q.Close() }() - - var ces []*CollectedEvent - var currentID int64 = -1 - var ce *CollectedEvent - - for q.Next() { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - var row struct { - id int64 - height uint64 - tipsetKey []byte - tipsetKeyCid []byte - emitterAddr []byte - eventIndex int - messageCid []byte - messageIndex int - reverted bool - flags []byte - key string - codec uint64 - value []byte - } - - if err := q.Scan( - &row.id, - &row.height, - &row.tipsetKey, - &row.tipsetKeyCid, - &row.emitterAddr, - &row.eventIndex, - &row.messageCid, - &row.messageIndex, - &row.reverted, - &row.flags, - &row.key, - &row.codec, - &row.value, - ); err != nil { - return xerrors.Errorf("read prefill row: %w", err) - } - - if row.id != currentID { - if ce != nil { - ces = append(ces, ce) - ce = nil - // Unfortunately we can't easily incorporate the max results limit into the query due to the - // unpredictable number of rows caused by joins - // Break here to stop collecting rows - if f.maxResults > 0 && len(ces) >= f.maxResults { - break - } - } - - currentID = row.id - ce = &CollectedEvent{ - EventIdx: row.eventIndex, - Reverted: row.reverted, - Height: abi.ChainEpoch(row.height), - MsgIdx: row.messageIndex, - } - - ce.EmitterAddr, err = address.NewFromBytes(row.emitterAddr) - if err != nil { - return xerrors.Errorf("parse emitter addr: %w", err) - } - - ce.TipSetKey, err = types.TipSetKeyFromBytes(row.tipsetKey) - if err != nil { - return xerrors.Errorf("parse tipsetkey: %w", err) - } - - ce.MsgCid, err = cid.Cast(row.messageCid) - if err != nil { - return xerrors.Errorf("parse message cid: %w", err) - } - } - - ce.Entries = append(ce.Entries, types.EventEntry{ - Flags: row.flags[0], - Key: row.key, - Codec: row.codec, - Value: row.value, - }) - } - - if ce != nil { - ces = append(ces, ce) - } - - if len(ces) == 0 { - return nil - } - - // collected event list is in inverted order since we selected only the most recent events - // sort it into height order - sort.Slice(ces, func(i, j int) bool { return ces[i].Height < ces[j].Height }) - f.setCollectedEvents(ces) - - return nil -} - -func makePrefillFilterQuery(f *eventFilter, excludeReverted bool) ([]any, string) { - clauses := []string{} - values := []any{} - joins := []string{} - - if f.tipsetCid != cid.Undef { - clauses = append(clauses, "event.tipset_key_cid=?") - values = append(values, f.tipsetCid.Bytes()) - } else { - if f.minHeight >= 0 && f.minHeight == f.maxHeight { - clauses = append(clauses, "event.height=?") - values = append(values, f.minHeight) - } else { - if f.maxHeight >= 0 && f.minHeight >= 0 { - clauses = append(clauses, "event.height BETWEEN ? AND ?") - values = append(values, f.minHeight, f.maxHeight) - } else if f.minHeight >= 0 { - clauses = append(clauses, "event.height >= ?") - values = append(values, f.minHeight) - } else if f.maxHeight >= 0 { - clauses = append(clauses, "event.height <= ?") - values = append(values, f.maxHeight) - } - } - } - - if excludeReverted { - clauses = append(clauses, "event.reverted=?") - values = append(values, false) - } - - if len(f.addresses) > 0 { - for _, addr := range f.addresses { - values = append(values, addr.Bytes()) - } - clauses = append(clauses, "event.emitter_addr IN ("+strings.Repeat("?,", len(f.addresses)-1)+"?)") - } - - if len(f.keysWithCodec) > 0 { - join := 0 - for key, vals := range f.keysWithCodec { - if len(vals) > 0 { - join++ - joinAlias := fmt.Sprintf("ee%d", join) - joins = append(joins, fmt.Sprintf("event_entry %s ON event.id=%[1]s.event_id", joinAlias)) - clauses = append(clauses, fmt.Sprintf("%s.indexed=1 AND %[1]s.key=?", joinAlias)) - values = append(values, key) - subclauses := make([]string, 0, len(vals)) - for _, val := range vals { - subclauses = append(subclauses, fmt.Sprintf("(%s.value=? AND %[1]s.codec=?)", joinAlias)) - values = append(values, val.Value, val.Codec) - } - clauses = append(clauses, "("+strings.Join(subclauses, " OR ")+")") - } - } - } - - s := `SELECT - event.id, - event.height, - event.tipset_key, - event.tipset_key_cid, - event.emitter_addr, - event.event_index, - event.message_cid, - event.message_index, - event.reverted, - event_entry.flags, - event_entry.key, - event_entry.codec, - event_entry.value - FROM event JOIN event_entry ON event.id=event_entry.event_id` - - if len(joins) > 0 { - s = s + ", " + strings.Join(joins, ", ") - } - - if len(clauses) > 0 { - s = s + " WHERE " + strings.Join(clauses, " AND ") - } - - // retain insertion order of event_entry rows with the implicit _rowid_ column - s += " ORDER BY event.height DESC, event_entry._rowid_ ASC" - return values, s -} diff --git a/chain/events/filter/index_migrations.go b/chain/events/filter/index_migrations.go deleted file mode 100644 index bf8fd2f943c..00000000000 --- a/chain/events/filter/index_migrations.go +++ /dev/null @@ -1,260 +0,0 @@ -package filter - -import ( - "context" - "database/sql" - "errors" - "fmt" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/lib/sqlite" -) - -func migrationVersion2(db *sql.DB, chainStore *store.ChainStore) sqlite.MigrationFunc { - return func(ctx context.Context, tx *sql.Tx) error { - // create some temporary indices to help speed up the migration - _, err := tx.ExecContext(ctx, "CREATE INDEX IF NOT EXISTS tmp_height_tipset_key_cid ON event (height,tipset_key_cid)") - if err != nil { - return xerrors.Errorf("create index tmp_height_tipset_key_cid: %w", err) - } - _, err = tx.ExecContext(ctx, "CREATE INDEX IF NOT EXISTS tmp_tipset_key_cid ON event (tipset_key_cid)") - if err != nil { - return xerrors.Errorf("create index tmp_tipset_key_cid: %w", err) - } - - stmtDeleteOffChainEvent, err := tx.PrepareContext(ctx, "DELETE FROM event WHERE tipset_key_cid!=? and height=?") - if err != nil { - return xerrors.Errorf("prepare stmtDeleteOffChainEvent: %w", err) - } - - stmtSelectEvent, err := tx.PrepareContext(ctx, "SELECT id FROM event WHERE tipset_key_cid=? ORDER BY message_index ASC, event_index ASC, id DESC LIMIT 1") - if err != nil { - return xerrors.Errorf("prepare stmtSelectEvent: %w", err) - } - - stmtDeleteEvent, err := tx.PrepareContext(ctx, "DELETE FROM event WHERE tipset_key_cid=? AND id= minHeight.Int64 { - if currTs.Height()%1000 == 0 { - log.Infof("Migrating height %d (remaining %d)", currTs.Height(), int64(currTs.Height())-minHeight.Int64) - } - - tsKey := currTs.Parents() - currTs, err = chainStore.GetTipSetFromKey(ctx, tsKey) - if err != nil { - return xerrors.Errorf("get tipset from key: %w", err) - } - log.Debugf("Migrating height %d", currTs.Height()) - - tsKeyCid, err := currTs.Key().Cid() - if err != nil { - return fmt.Errorf("tipset key cid: %w", err) - } - - // delete all events that are not in the canonical chain - _, err = stmtDeleteOffChainEvent.Exec(tsKeyCid.Bytes(), currTs.Height()) - if err != nil { - return xerrors.Errorf("delete off chain event: %w", err) - } - - // find the first eventId from the last time the tipset was applied - var eventId sql.NullInt64 - err = stmtSelectEvent.QueryRow(tsKeyCid.Bytes()).Scan(&eventId) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - continue - } - return xerrors.Errorf("select event: %w", err) - } - - // this tipset might not have any events which is ok - if !eventId.Valid { - continue - } - log.Debugf("Deleting all events with id < %d at height %d", eventId.Int64, currTs.Height()) - - res, err := stmtDeleteEvent.Exec(tsKeyCid.Bytes(), eventId.Int64) - if err != nil { - return xerrors.Errorf("delete event: %w", err) - } - - nrRowsAffected, err := res.RowsAffected() - if err != nil { - return xerrors.Errorf("rows affected: %w", err) - } - log.Debugf("deleted %d events from tipset %s", nrRowsAffected, tsKeyCid.String()) - } - - // delete all entries that have an event_id that doesn't exist (since we don't have a foreign - // key constraint that gives us cascading deletes) - res, err := tx.ExecContext(ctx, "DELETE FROM event_entry WHERE event_id NOT IN (SELECT id FROM event)") - if err != nil { - return xerrors.Errorf("delete event_entry: %w", err) - } - - nrRowsAffected, err := res.RowsAffected() - if err != nil { - return xerrors.Errorf("rows affected: %w", err) - } - log.Infof("Cleaned up %d entries that had deleted events\n", nrRowsAffected) - - // drop the temporary indices after the migration - _, err = tx.ExecContext(ctx, "DROP INDEX IF EXISTS tmp_tipset_key_cid") - if err != nil { - return xerrors.Errorf("drop index tmp_tipset_key_cid: %w", err) - } - _, err = tx.ExecContext(ctx, "DROP INDEX IF EXISTS tmp_height_tipset_key_cid") - if err != nil { - return xerrors.Errorf("drop index tmp_height_tipset_key_cid: %w", err) - } - - // original v2 migration introduced an index: - // CREATE INDEX IF NOT EXISTS height_tipset_key ON event (height,tipset_key) - // which has subsequently been removed in v4, so it's omitted here - - return nil - } -} - -// migrationVersion3 migrates the schema from version 2 to version 3 by creating two indices: -// 1) an index on the event.emitter_addr column, and 2) an index on the event_entry.key column. -// -// As of version 7, these indices have been removed as they were found to be a performance -// hindrance. This migration is now a no-op. -func migrationVersion3(ctx context.Context, tx *sql.Tx) error { - return nil -} - -// migrationVersion4 migrates the schema from version 3 to version 4 by adjusting indexes to match -// the query patterns of the event filter. -// -// First it drops indexes introduced in previous migrations: -// 1. the index on the event.height and event.tipset_key columns -// 2. the index on the event_entry.key column -// -// And then creating the following indices: -// 1. an index on the event.tipset_key_cid column -// 2. an index on the event.height column -// 3. an index on the event.reverted column (removed in version 7) -// 4. a composite index on the event_entry.indexed and event_entry.key columns (removed in version 7) -// 5. a composite index on the event_entry.codec and event_entry.value columns (removed in version 7) -// 6. an index on the event_entry.event_id column -// -// Indexes 3, 4, and 5 were removed in version 7 as they were found to be a performance hindrance so -// are omitted here. -func migrationVersion4(ctx context.Context, tx *sql.Tx) error { - for _, create := range []struct { - desc string - query string - }{ - {"drop index height_tipset_key", "DROP INDEX IF EXISTS height_tipset_key;"}, - {"drop index event_entry_key_index", "DROP INDEX IF EXISTS event_entry_key_index;"}, - {"create index event_tipset_key_cid", createIndexEventTipsetKeyCid}, - {"create index event_height", createIndexEventHeight}, - {"create index event_entry_event_id", createIndexEventEntryEventId}, - } { - if _, err := tx.ExecContext(ctx, create.query); err != nil { - return xerrors.Errorf("%s: %w", create.desc, err) - } - } - - return nil -} - -// migrationVersion5 migrates the schema from version 4 to version 5 by updating the event_index -// to be 0-indexed within a tipset. -func migrationVersion5(ctx context.Context, tx *sql.Tx) error { - stmtEventIndexUpdate, err := tx.PrepareContext(ctx, "UPDATE event SET event_index = (SELECT COUNT(*) FROM event e2 WHERE e2.tipset_key_cid = event.tipset_key_cid AND e2.id <= event.id) - 1") - if err != nil { - return xerrors.Errorf("prepare stmtEventIndexUpdate: %w", err) - } - - _, err = stmtEventIndexUpdate.ExecContext(ctx) - if err != nil { - return xerrors.Errorf("update event index: %w", err) - } - - return nil -} - -// migrationVersion6 migrates the schema from version 5 to version 6 by creating a new table -// events_seen that tracks the tipsets that have been seen by the event filter and populating it -// with the tipsets that have events in the event table. -func migrationVersion6(ctx context.Context, tx *sql.Tx) error { - stmtCreateTableEventsSeen, err := tx.PrepareContext(ctx, createTableEventsSeen) - if err != nil { - return xerrors.Errorf("prepare stmtCreateTableEventsSeen: %w", err) - } - _, err = stmtCreateTableEventsSeen.ExecContext(ctx) - if err != nil { - return xerrors.Errorf("create table events_seen: %w", err) - } - - _, err = tx.ExecContext(ctx, createIndexEventsSeenHeight) - if err != nil { - return xerrors.Errorf("create index events_seen_height: %w", err) - } - _, err = tx.ExecContext(ctx, createIndexEventsSeenTipsetKeyCid) - if err != nil { - return xerrors.Errorf("create index events_seen_tipset_key_cid: %w", err) - } - - // INSERT an entry in the events_seen table for all epochs we do have events for in our DB - _, err = tx.ExecContext(ctx, ` - INSERT OR IGNORE INTO events_seen (height, tipset_key_cid, reverted) - SELECT DISTINCT height, tipset_key_cid, reverted FROM event -`) - if err != nil { - return xerrors.Errorf("insert events into events_seen: %w", err) - } - - return nil -} - -// migrationVersion7 migrates the schema from version 6 to version 7 by dropping the following -// indices: -// 1. the index on the event.emitter_addr column -// 2. the index on the event.reverted column -// 3. the composite index on the event_entry.indexed and event_entry.key columns -// 4. the composite index on the event_entry.codec and event_entry.value columns -// -// These indices were found to be a performance hindrance as they prevent SQLite from using the -// intended initial indexes on height or tipset_key_cid in many query variations. Without additional -// indices to fall-back on, SQLite is forced to narrow down each query via height or tipset_key_cid -// which is the desired behavior. -func migrationVersion7(ctx context.Context, tx *sql.Tx) error { - for _, drop := range []struct { - desc string - query string - }{ - {"drop index event_emitter_addr", "DROP INDEX IF EXISTS event_emitter_addr;"}, - {"drop index event_reverted", "DROP INDEX IF EXISTS event_reverted;"}, - {"drop index event_entry_indexed_key", "DROP INDEX IF EXISTS event_entry_indexed_key;"}, - {"drop index event_entry_codec_value", "DROP INDEX IF EXISTS event_entry_codec_value;"}, - } { - if _, err := tx.ExecContext(ctx, drop.query); err != nil { - return xerrors.Errorf("%s: %w", drop.desc, err) - } - } - - return nil -} diff --git a/chain/events/filter/index_test.go b/chain/events/filter/index_test.go deleted file mode 100644 index 008b5697130..00000000000 --- a/chain/events/filter/index_test.go +++ /dev/null @@ -1,1046 +0,0 @@ -package filter - -import ( - "context" - pseudo "math/rand" - "os" - "path/filepath" - "regexp" - "strings" - "testing" - - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/chain/types" -) - -func TestEventIndexPrefillFilter(t *testing.T) { - rng := pseudo.New(pseudo.NewSource(299792458)) - a1 := randomF4Addr(t, rng) - a2 := randomF4Addr(t, rng) - - a1ID := abi.ActorID(1) - a2ID := abi.ActorID(2) - - addrMap := addressMap{} - addrMap.add(a1ID, a1) - addrMap.add(a2ID, a2) - - ev1 := fakeEvent( - a1ID, - []kv{ - {k: "type", v: []byte("approval")}, - {k: "signer", v: []byte("addr1")}, - }, - []kv{ - {k: "amount", v: []byte("2988181")}, - }, - ) - - st := newStore() - events := []*types.Event{ev1} - em := executedMessage{ - msg: fakeMessage(randomF4Addr(t, rng), randomF4Addr(t, rng)), - rct: fakeReceipt(t, rng, st, events), - evs: events, - } - - events14000 := buildTipSetEvents(t, rng, 14000, em) - cid14000, err := events14000.msgTs.Key().Cid() - require.NoError(t, err, "tipset cid") - - noCollectedEvents := []*CollectedEvent{} - oneCollectedEvent := []*CollectedEvent{ - { - Entries: ev1.Entries, - EmitterAddr: a1, - EventIdx: 0, - Reverted: false, - Height: 14000, - TipSetKey: events14000.msgTs.Key(), - MsgIdx: 0, - MsgCid: em.msg.Cid(), - }, - } - - workDir, err := os.MkdirTemp("", "lotusevents") - require.NoError(t, err, "create temporary work directory") - - defer func() { - _ = os.RemoveAll(workDir) - }() - t.Logf("using work dir %q", workDir) - - dbPath := filepath.Join(workDir, "actorevents.db") - - ei, err := NewEventIndex(context.Background(), dbPath, nil) - require.NoError(t, err, "create event index") - - subCh, unSubscribe := ei.SubscribeUpdates() - defer unSubscribe() - - out := make(chan EventIndexUpdated, 1) - go func() { - tu := <-subCh - out <- tu - }() - - if err := ei.CollectEvents(context.Background(), events14000, false, addrMap.ResolveAddress); err != nil { - require.NoError(t, err, "collect events") - } - - mh, err := ei.GetMaxHeightInIndex(context.Background()) - require.NoError(t, err) - require.Equal(t, uint64(14000), mh) - - b, err := ei.IsHeightPast(context.Background(), 14000) - require.NoError(t, err) - require.True(t, b) - - b, err = ei.IsHeightPast(context.Background(), 14001) - require.NoError(t, err) - require.False(t, b) - - b, err = ei.IsHeightPast(context.Background(), 13000) - require.NoError(t, err) - require.True(t, b) - - tsKey := events14000.msgTs.Key() - tsKeyCid, err := tsKey.Cid() - require.NoError(t, err, "tipset key cid") - - seen, err := ei.IsTipsetProcessed(context.Background(), tsKeyCid.Bytes()) - require.NoError(t, err) - require.True(t, seen, "tipset key should be seen") - - seen, err = ei.IsTipsetProcessed(context.Background(), []byte{1}) - require.NoError(t, err) - require.False(t, seen, "tipset key should not be seen") - - _ = <-out - - testCases := []struct { - name string - filter *eventFilter - te *TipSetEvents - want []*CollectedEvent - }{ - { - name: "nomatch tipset min height", - filter: &eventFilter{ - minHeight: 14001, - maxHeight: -1, - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch tipset max height", - filter: &eventFilter{ - minHeight: -1, - maxHeight: 13999, - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "match tipset min height", - filter: &eventFilter{ - minHeight: 14000, - maxHeight: -1, - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "match tipset cid", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - tipsetCid: cid14000, - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "nomatch address", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - addresses: []address.Address{a2}, - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "match address", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - addresses: []address.Address{a1}, - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "match one entry", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - }), - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "match one entry with alternate values", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("cancel"), - []byte("propose"), - []byte("approval"), - }, - }), - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "nomatch one entry by missing value", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("cancel"), - []byte("propose"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch one entry by missing key", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "method": { - []byte("approval"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "match one entry with multiple keys", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - "signer": { - []byte("addr1"), - }, - }), - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "nomatch one entry with one mismatching key", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - "approver": { - []byte("addr1"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch one entry with one mismatching value", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - "signer": { - []byte("addr2"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch one entry with one unindexed key", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "amount": { - []byte("2988181"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - } - - for _, tc := range testCases { - tc := tc // appease lint - t.Run(tc.name, func(t *testing.T) { - if err := ei.prefillFilter(context.Background(), tc.filter, false); err != nil { - require.NoError(t, err, "prefill filter events") - } - - coll := tc.filter.TakeCollectedEvents(context.Background()) - require.ElementsMatch(t, coll, tc.want) - }) - } -} - -func TestEventIndexPrefillFilterExcludeReverted(t *testing.T) { - rng := pseudo.New(pseudo.NewSource(299792458)) - a1 := randomF4Addr(t, rng) - a2 := randomF4Addr(t, rng) - a3 := randomF4Addr(t, rng) - - a1ID := abi.ActorID(1) - a2ID := abi.ActorID(2) - - addrMap := addressMap{} - addrMap.add(a1ID, a1) - addrMap.add(a2ID, a2) - - ev1 := fakeEvent( - a1ID, - []kv{ - {k: "type", v: []byte("approval")}, - {k: "signer", v: []byte("addr1")}, - }, - []kv{ - {k: "amount", v: []byte("2988181")}, - }, - ) - ev2 := fakeEvent( - a2ID, - []kv{ - {k: "type", v: []byte("approval")}, - {k: "signer", v: []byte("addr2")}, - }, - []kv{ - {k: "amount", v: []byte("2988182")}, - }, - ) - - st := newStore() - events := []*types.Event{ev1} - revertedEvents := []*types.Event{ev2} - em := executedMessage{ - msg: fakeMessage(randomF4Addr(t, rng), randomF4Addr(t, rng)), - rct: fakeReceipt(t, rng, st, events), - evs: events, - } - revertedEm := executedMessage{ - msg: fakeMessage(randomF4Addr(t, rng), randomF4Addr(t, rng)), - rct: fakeReceipt(t, rng, st, revertedEvents), - evs: revertedEvents, - } - - events14000 := buildTipSetEvents(t, rng, 14000, em) - revertedEvents14000 := buildTipSetEvents(t, rng, 14000, revertedEm) - cid14000, err := events14000.msgTs.Key().Cid() - require.NoError(t, err, "tipset cid") - reveredCID14000, err := revertedEvents14000.msgTs.Key().Cid() - require.NoError(t, err, "tipset cid") - - noCollectedEvents := []*CollectedEvent{} - oneCollectedEvent := []*CollectedEvent{ - { - Entries: ev1.Entries, - EmitterAddr: a1, - EventIdx: 0, - Reverted: false, - Height: 14000, - TipSetKey: events14000.msgTs.Key(), - MsgIdx: 0, - MsgCid: em.msg.Cid(), - }, - } - twoCollectedEvent := []*CollectedEvent{ - { - Entries: ev1.Entries, - EmitterAddr: a1, - EventIdx: 0, - Reverted: false, - Height: 14000, - TipSetKey: events14000.msgTs.Key(), - MsgIdx: 0, - MsgCid: em.msg.Cid(), - }, - { - Entries: ev2.Entries, - EmitterAddr: a2, - EventIdx: 0, - Reverted: true, - Height: 14000, - TipSetKey: revertedEvents14000.msgTs.Key(), - MsgIdx: 0, - MsgCid: revertedEm.msg.Cid(), - }, - } - oneCollectedRevertedEvent := []*CollectedEvent{ - { - Entries: ev2.Entries, - EmitterAddr: a2, - EventIdx: 0, - Reverted: true, - Height: 14000, - TipSetKey: revertedEvents14000.msgTs.Key(), - MsgIdx: 0, - MsgCid: revertedEm.msg.Cid(), - }, - } - - workDir, err := os.MkdirTemp("", "lotusevents") - require.NoError(t, err, "create temporary work directory") - - defer func() { - _ = os.RemoveAll(workDir) - }() - t.Logf("using work dir %q", workDir) - - dbPath := filepath.Join(workDir, "actorevents.db") - - ei, err := NewEventIndex(context.Background(), dbPath, nil) - require.NoError(t, err, "create event index") - - tCh := make(chan EventIndexUpdated, 3) - subCh, unSubscribe := ei.SubscribeUpdates() - defer unSubscribe() - go func() { - cnt := 0 - for tu := range subCh { - tCh <- tu - cnt++ - if cnt == 3 { - close(tCh) - return - } - } - }() - - if err := ei.CollectEvents(context.Background(), revertedEvents14000, false, addrMap.ResolveAddress); err != nil { - require.NoError(t, err, "collect reverted events") - } - if err := ei.CollectEvents(context.Background(), revertedEvents14000, true, addrMap.ResolveAddress); err != nil { - require.NoError(t, err, "revert reverted events") - } - if err := ei.CollectEvents(context.Background(), events14000, false, addrMap.ResolveAddress); err != nil { - require.NoError(t, err, "collect events") - } - - _ = <-tCh - _ = <-tCh - _ = <-tCh - - inclusiveTestCases := []struct { - name string - filter *eventFilter - te *TipSetEvents - want []*CollectedEvent - }{ - { - name: "nomatch tipset min height", - filter: &eventFilter{ - minHeight: 14001, - maxHeight: -1, - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch tipset max height", - filter: &eventFilter{ - minHeight: -1, - maxHeight: 13999, - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "match tipset min height", - filter: &eventFilter{ - minHeight: 14000, - maxHeight: -1, - }, - te: events14000, - want: twoCollectedEvent, - }, - { - name: "match tipset cid", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - tipsetCid: cid14000, - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "match tipset cid", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - tipsetCid: reveredCID14000, - }, - te: revertedEvents14000, - want: oneCollectedRevertedEvent, - }, - { - name: "nomatch address", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - addresses: []address.Address{a3}, - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "match address 2", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - addresses: []address.Address{a2}, - }, - te: revertedEvents14000, - want: oneCollectedRevertedEvent, - }, - { - name: "match address 1", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - addresses: []address.Address{a1}, - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "match one entry", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - }), - }, - te: events14000, - want: twoCollectedEvent, - }, - { - name: "match one entry with alternate values", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("cancel"), - []byte("propose"), - []byte("approval"), - }, - }), - }, - te: events14000, - want: twoCollectedEvent, - }, - { - name: "nomatch one entry by missing value", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("cancel"), - []byte("propose"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch one entry by missing key", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "method": { - []byte("approval"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "match one entry with multiple keys", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - "signer": { - []byte("addr1"), - }, - }), - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "match one entry with multiple keys", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - "signer": { - []byte("addr2"), - }, - }), - }, - te: revertedEvents14000, - want: oneCollectedRevertedEvent, - }, - { - name: "nomatch one entry with one mismatching key", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - "approver": { - []byte("addr1"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch one entry with one mismatching value", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - "signer": { - []byte("addr3"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch one entry with one unindexed key", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "amount": { - []byte("2988181"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch one entry with one unindexed key", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "amount": { - []byte("2988182"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - } - - exclusiveTestCases := []struct { - name string - filter *eventFilter - te *TipSetEvents - want []*CollectedEvent - }{ - { - name: "nomatch tipset min height", - filter: &eventFilter{ - minHeight: 14001, - maxHeight: -1, - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch tipset max height", - filter: &eventFilter{ - minHeight: -1, - maxHeight: 13999, - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "match tipset min height", - filter: &eventFilter{ - minHeight: 14000, - maxHeight: -1, - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "match tipset cid", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - tipsetCid: cid14000, - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "match tipset cid but reverted", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - tipsetCid: reveredCID14000, - }, - te: revertedEvents14000, - want: noCollectedEvents, - }, - { - name: "nomatch address", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - addresses: []address.Address{a3}, - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch address 2 but reverted", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - addresses: []address.Address{a2}, - }, - te: revertedEvents14000, - want: noCollectedEvents, - }, - { - name: "match address", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - addresses: []address.Address{a1}, - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "match one entry", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - }), - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "match one entry with alternate values", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("cancel"), - []byte("propose"), - []byte("approval"), - }, - }), - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "nomatch one entry by missing value", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("cancel"), - []byte("propose"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch one entry by missing key", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "method": { - []byte("approval"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "match one entry with multiple keys", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - "signer": { - []byte("addr1"), - }, - }), - }, - te: events14000, - want: oneCollectedEvent, - }, - { - name: "nomatch one entry with one mismatching key", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - "approver": { - []byte("addr1"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch one entry with matching reverted value", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - "signer": { - []byte("addr2"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch one entry with one mismatching value", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "type": { - []byte("approval"), - }, - "signer": { - []byte("addr3"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - { - name: "nomatch one entry with one unindexed key", - filter: &eventFilter{ - minHeight: -1, - maxHeight: -1, - keysWithCodec: keysToKeysWithCodec(map[string][][]byte{ - "amount": { - []byte("2988181"), - }, - }), - }, - te: events14000, - want: noCollectedEvents, - }, - } - - for _, tc := range inclusiveTestCases { - tc := tc // appease lint - t.Run(tc.name, func(t *testing.T) { - if err := ei.prefillFilter(context.Background(), tc.filter, false); err != nil { - require.NoError(t, err, "prefill filter events") - } - - coll := tc.filter.TakeCollectedEvents(context.Background()) - require.ElementsMatch(t, coll, tc.want, tc.name) - }) - } - - for _, tc := range exclusiveTestCases { - tc := tc // appease lint - t.Run(tc.name, func(t *testing.T) { - if err := ei.prefillFilter(context.Background(), tc.filter, true); err != nil { - require.NoError(t, err, "prefill filter events") - } - - coll := tc.filter.TakeCollectedEvents(context.Background()) - require.ElementsMatch(t, coll, tc.want, tc.name) - }) - } -} - -// TestQueryPlan is to ensure that future modifications to the db schema, or future upgrades to -// sqlite, do not change the query plan of the prepared statements used by the event index such that -// queries hit undesirable indexes which are likely to slow down the query. -// Changes that break this test need to be sure that the query plan is still efficient for the -// expected query patterns. -func TestQueryPlan(t *testing.T) { - ei, err := NewEventIndex(context.Background(), filepath.Join(t.TempDir(), "actorevents.db"), nil) - require.NoError(t, err, "create event index") - - verifyQueryPlan := func(stmt string) { - rows, err := ei.db.Query("EXPLAIN QUERY PLAN " + strings.Replace(stmt, "?", "1", -1)) - require.NoError(t, err, "explain query plan for query: "+stmt) - defer func() { - require.NoError(t, rows.Close()) - }() - // First response to EXPLAIN QUERY PLAN should show us the use of an index that we want to - // encounter first to narrow down the search space - either a height or tipset_key_cid index - // - sqlite_autoindex_events_seen_1 is for the UNIQUE constraint on events_seen - // - events_seen_height and events_seen_tipset_key_cid are explicit indexes on events_seen - // - event_height and event_tipset_key_cid are explicit indexes on event - rows.Next() - var id, parent, notused, detail string - require.NoError(t, rows.Scan(&id, &parent, ¬used, &detail), "scan explain query plan for query: "+stmt) - detail = strings.TrimSpace(detail) - var expectedIndexes = []string{ - "sqlite_autoindex_events_seen_1", - "events_seen_height", - "events_seen_tipset_key_cid", - "event_height", - "event_tipset_key_cid", - } - indexUsed := false - for _, index := range expectedIndexes { - if strings.Contains(detail, " INDEX "+index) { - indexUsed = true - break - } - } - require.True(t, indexUsed, "index used for query: "+stmt+" detail: "+detail) - - stmt = regexp.MustCompile(`(?m)^\s+`).ReplaceAllString(stmt, " ") // remove all leading whitespace from the statement - stmt = strings.Replace(stmt, "\n", "", -1) // remove all newlines from the statement - t.Logf("[%s] has plan start: %s", stmt, detail) - } - - // Test the hard-coded select and update queries - stmtMap := preparedStatementMapping(&preparedStatements{}) - for _, stmt := range stmtMap { - if strings.HasPrefix(strings.TrimSpace(strings.ToLower(stmt)), "insert") { - continue - } - verifyQueryPlan(stmt) - } - - // Test the dynamic prefillFilter queries - prefillCases := []*eventFilter{ - {}, - {minHeight: 14000, maxHeight: 14000}, - {minHeight: 14000, maxHeight: 15000}, - {tipsetCid: cid.MustParse("bafkqaaa")}, - {minHeight: 14000, maxHeight: 14000, addresses: []address.Address{address.TestAddress}}, - {minHeight: 14000, maxHeight: 15000, addresses: []address.Address{address.TestAddress}}, - {tipsetCid: cid.MustParse("bafkqaaa"), addresses: []address.Address{address.TestAddress}}, - {minHeight: 14000, maxHeight: 14000, addresses: []address.Address{address.TestAddress, address.TestAddress}}, - {minHeight: 14000, maxHeight: 15000, addresses: []address.Address{address.TestAddress, address.TestAddress}}, - {tipsetCid: cid.MustParse("bafkqaaa"), addresses: []address.Address{address.TestAddress, address.TestAddress}}, - {minHeight: 14000, maxHeight: 14000, keysWithCodec: keysToKeysWithCodec(map[string][][]byte{"type": {[]byte("approval")}})}, - {minHeight: 14000, maxHeight: 15000, keysWithCodec: keysToKeysWithCodec(map[string][][]byte{"type": {[]byte("approval")}})}, - {tipsetCid: cid.MustParse("bafkqaaa"), keysWithCodec: keysToKeysWithCodec(map[string][][]byte{"type": {[]byte("approval")}})}, - {minHeight: 14000, maxHeight: 14000, keysWithCodec: keysToKeysWithCodec(map[string][][]byte{"type": {[]byte("approval")}, "signer": {[]byte("addr1")}})}, - {minHeight: 14000, maxHeight: 15000, keysWithCodec: keysToKeysWithCodec(map[string][][]byte{"type": {[]byte("approval")}, "signer": {[]byte("addr1")}})}, - {tipsetCid: cid.MustParse("bafkqaaa"), keysWithCodec: keysToKeysWithCodec(map[string][][]byte{"type": {[]byte("approval")}, "signer": {[]byte("addr1")}})}, - {minHeight: 14000, maxHeight: 14000, addresses: []address.Address{address.TestAddress, address.TestAddress}, keysWithCodec: keysToKeysWithCodec(map[string][][]byte{"type": {[]byte("approval")}, "signer": {[]byte("addr1")}})}, - {minHeight: 14000, maxHeight: 15000, addresses: []address.Address{address.TestAddress, address.TestAddress}, keysWithCodec: keysToKeysWithCodec(map[string][][]byte{"type": {[]byte("approval")}, "signer": {[]byte("addr1")}})}, - {tipsetCid: cid.MustParse("bafkqaaa"), addresses: []address.Address{address.TestAddress, address.TestAddress}, keysWithCodec: keysToKeysWithCodec(map[string][][]byte{"type": {[]byte("approval")}, "signer": {[]byte("addr1")}})}, - } - for _, filter := range prefillCases { - _, query := makePrefillFilterQuery(filter, true) - verifyQueryPlan(query) - _, query = makePrefillFilterQuery(filter, false) - verifyQueryPlan(query) - } -} diff --git a/chain/events/observer.go b/chain/events/observer.go index 0b021f9965b..896440eacbc 100644 --- a/chain/events/observer.go +++ b/chain/events/observer.go @@ -157,13 +157,14 @@ func (o *observer) applyChanges(ctx context.Context, changes []*api.HeadChange) } func (o *observer) headChange(ctx context.Context, rev, app []*types.TipSet) error { + o.lk.Lock() + defer o.lk.Unlock() + ctx, span := trace.StartSpan(ctx, "events.HeadChange") span.AddAttributes(trace.Int64Attribute("reverts", int64(len(rev)))) span.AddAttributes(trace.Int64Attribute("applies", int64(len(app)))) - o.lk.Lock() head := o.head - o.lk.Unlock() defer func() { span.AddAttributes(trace.Int64Attribute("endHeight", int64(head.Height()))) @@ -199,14 +200,12 @@ func (o *observer) headChange(ctx context.Context, rev, app []*types.TipSet) err // 1. We need to get the observers every time in case some registered/deregistered. // 2. We need to atomically set the head so new observers don't see events twice or // skip them. - o.lk.Lock() - observers := o.observers + o.head = to - o.lk.Unlock() - for _, obs := range observers { + for _, obs := range o.observers { if err := obs.Revert(ctx, from, to); err != nil { - log.Errorf("observer %T failed to apply tipset %s (%d) with: %s", obs, from.Key(), from.Height(), err) + log.Errorf("observer %T failed to revert tipset %s (%d) with: %s", obs, from.Key(), from.Height(), err) } } @@ -225,14 +224,11 @@ func (o *observer) headChange(ctx context.Context, rev, app []*types.TipSet) err ) } - o.lk.Lock() - observers := o.observers o.head = to - o.lk.Unlock() - for _, obs := range observers { + for _, obs := range o.observers { if err := obs.Apply(ctx, head, to); err != nil { - log.Errorf("observer %T failed to revert tipset %s (%d) with: %s", obs, to.Key(), to.Height(), err) + log.Errorf("observer %T failed to apply tipset %s (%d) with: %s", obs, to.Key(), to.Height(), err) } } if to.Height() > o.maxHeight { @@ -244,6 +240,41 @@ func (o *observer) headChange(ctx context.Context, rev, app []*types.TipSet) err return nil } +// ObserveAndBlock registers the observer and returns the current tipset along with an unlock function. +// +// This method guarantees that the observer will receive tipset updates starting from the returned tipset. +// It blocks all tipset updates for all clients until the returned unlock function is called. +// +// The typical usage pattern is: +// 1. Call ObserveAndBlock to register the observer +// 2. Perform any necessary initialization using the returned current tipset +// 3. Call the unlock function to start receiving updates +// +// Important notes: +// - This method should only be called after the observer has been started +// - The unlock function must be called to prevent blocking of tipset updates for all registered observers +// - This method returns an error if the observer hasn't started yet +// +// Returns: +// - *types.TipSet: The current tipset at the time of registration +// - func(): An unlock function that must be called to start receiving updates +// - error: An error if the observer hasn't started yet +func (o *observer) ObserveAndBlock(obs TipSetObserver) (*types.TipSet, func(), error) { + o.lk.Lock() + currentHead := o.head + if currentHead == nil { + o.lk.Unlock() + return nil, func() {}, xerrors.New("observer not started") + } + + o.observers = append(o.observers, obs) + unlockHandle := func() { + o.lk.Unlock() + } + + return currentHead, unlockHandle, nil +} + // Observe registers the observer, and returns the current tipset. The observer is guaranteed to // observe events starting at this tipset. // diff --git a/chain/gen/gen.go b/chain/gen/gen.go index 435d942dc18..b05a14a5d55 100644 --- a/chain/gen/gen.go +++ b/chain/gen/gen.go @@ -35,7 +35,6 @@ import ( "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis" - "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/proofs" proofsffi "github.com/filecoin-project/lotus/chain/proofs/ffi" "github.com/filecoin-project/lotus/chain/rand" @@ -258,7 +257,7 @@ func NewGeneratorWithSectorsAndUpgradeSchedule(numSectors int, us stmgr.UpgradeS //return nil, xerrors.Errorf("creating drand beacon: %w", err) //} - sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), sys, us, beac, ds, index.DummyMsgIndex) + sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), sys, us, beac, ds, nil) if err != nil { return nil, xerrors.Errorf("initing stmgr: %w", err) } diff --git a/chain/index/api.go b/chain/index/api.go new file mode 100644 index 00000000000..9588383ea1f --- /dev/null +++ b/chain/index/api.go @@ -0,0 +1,337 @@ +package index + +import ( + "context" + "database/sql" + "errors" + + "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/chain/types" +) + +var ErrChainForked = xerrors.New("chain forked") + +func (si *SqliteIndexer) ChainValidateIndex(ctx context.Context, epoch abi.ChainEpoch, backfill bool) (*types.IndexValidation, error) { + // return an error if the indexer is not started + if !si.started { + return nil, errors.New("ChainValidateIndex called before indexer start") + } + + // return an error if the indexer is closed + if si.isClosed() { + return nil, errors.New("ChainValidateIndex called on closed indexer") + } + + // this API only works for epoch < head because of deferred execution in Filecoin + head := si.cs.GetHeaviestTipSet() + if epoch >= head.Height() { + return nil, xerrors.Errorf("cannot validate index at epoch %d, can only validate at an epoch less than chain head epoch %d", epoch, head.Height()) + } + + // fetch the tipset at the given epoch on the canonical chain + expectedTs, err := si.cs.GetTipsetByHeight(ctx, epoch, head, true) + if err != nil { + return nil, xerrors.Errorf("failed to get tipset at height %d: %w", epoch, err) + } + + // we need to take a write lock here so that back-filling does not race with real-time chain indexing + if backfill { + si.writerLk.Lock() + defer si.writerLk.Unlock() + } + + var isIndexEmpty bool + if err := si.stmts.isIndexEmptyStmt.QueryRowContext(ctx).Scan(&isIndexEmpty); err != nil { + return nil, xerrors.Errorf("failed to check if index is empty: %w", err) + } + + // Canonical chain has a null round at the epoch -> return if index is empty otherwise validate that index also + // has a null round at this epoch i.e. it does not have anything indexed at all for this epoch + if expectedTs.Height() != epoch { + if isIndexEmpty { + return &types.IndexValidation{ + Height: epoch, + IsNullRound: true, + }, nil + } + // validate the db has a hole here and error if not, we don't attempt to repair because something must be very wrong for this to fail + return si.validateIsNullRound(ctx, epoch) + } + + // if the index is empty -> short-circuit and simply backfill if applicable + if isIndexEmpty { + if !backfill { + return nil, makeBackfillRequiredErr(epoch) + } + return si.backfillMissingTipset(ctx, expectedTs) + } + // see if the tipset at this epoch is already indexed or if we need to backfill + revertedCount, nonRevertedCount, err := si.getTipsetCountsAtHeight(ctx, epoch) + if err != nil { + if err == sql.ErrNoRows { + if !backfill { + return nil, makeBackfillRequiredErr(epoch) + } + return si.backfillMissingTipset(ctx, expectedTs) + } + return nil, xerrors.Errorf("failed to get tipset counts at height %d: %w", epoch, err) + } + + switch { + case revertedCount == 0 && nonRevertedCount == 0: + // no tipsets at this epoch in the index, backfill + if !backfill { + return nil, makeBackfillRequiredErr(epoch) + } + return si.backfillMissingTipset(ctx, expectedTs) + + case revertedCount > 0 && nonRevertedCount == 0: + return nil, xerrors.Errorf("index corruption: height %d only has reverted tipsets", epoch) + + case nonRevertedCount > 1: + return nil, xerrors.Errorf("index corruption: height %d has multiple non-reverted tipsets", epoch) + } + + // fetch the non-reverted tipset at this epoch + var indexedTsKeyCidBytes []byte + err = si.stmts.getNonRevertedTipsetAtHeightStmt.QueryRowContext(ctx, epoch).Scan(&indexedTsKeyCidBytes) + if err != nil { + return nil, xerrors.Errorf("failed to get non-reverted tipset at height %d: %w", epoch, err) + } + + indexedTsKeyCid, err := cid.Cast(indexedTsKeyCidBytes) + if err != nil { + return nil, xerrors.Errorf("failed to cast tipset key cid: %w", err) + } + expectedTsKeyCid, err := expectedTs.Key().Cid() + if err != nil { + return nil, xerrors.Errorf("failed to get tipset key cid: %w", err) + } + if !indexedTsKeyCid.Equals(expectedTsKeyCid) { + return nil, xerrors.Errorf("index corruption: indexed tipset at height %d has key %s, but canonical chain has %s", epoch, indexedTsKeyCid, expectedTsKeyCid) + } + + getAndVerifyIndexedData := func() (*indexedTipSetData, error) { + indexedData, err := si.getIndexedTipSetData(ctx, expectedTs) + if err != nil { + return nil, xerrors.Errorf("failed to get indexed data for tipset at height %d: %w", expectedTs.Height(), err) + } + if indexedData == nil { + return nil, xerrors.Errorf("nil indexed data for tipset at height %d", expectedTs.Height()) + } + if err = si.verifyIndexedData(ctx, expectedTs, indexedData); err != nil { + return nil, err + } + return indexedData, nil + } + + indexedData, err := getAndVerifyIndexedData() + var bf bool + if err != nil { + if !backfill { + return nil, xerrors.Errorf("failed to verify indexed data at height %d: %w", expectedTs.Height(), err) + } + + log.Warnf("failed to verify indexed data at height %d; err:%s; backfilling once and validating again", expectedTs.Height(), err) + if _, err := si.backfillMissingTipset(ctx, expectedTs); err != nil { + return nil, xerrors.Errorf("failed to backfill missing tipset at height %d during validation; err: %w", expectedTs.Height(), err) + } + + indexedData, err = getAndVerifyIndexedData() + if err != nil { + return nil, xerrors.Errorf("failed to verify indexed data at height %d after backfill: %w", expectedTs.Height(), err) + } + bf = true + } + + return &types.IndexValidation{ + TipSetKey: expectedTs.Key(), + Height: expectedTs.Height(), + IndexedMessagesCount: indexedData.nonRevertedMessageCount, + IndexedEventsCount: indexedData.nonRevertedEventCount, + IndexedEventEntriesCount: indexedData.nonRevertedEventEntriesCount, + Backfilled: bf, + }, nil +} + +func (si *SqliteIndexer) validateIsNullRound(ctx context.Context, epoch abi.ChainEpoch) (*types.IndexValidation, error) { + // make sure we do not have tipset(reverted or non-reverted) indexed at this epoch + var isNullRound bool + err := si.stmts.hasNullRoundAtHeightStmt.QueryRowContext(ctx, epoch).Scan(&isNullRound) + if err != nil { + return nil, xerrors.Errorf("failed to check if null round exists at height %d: %w", epoch, err) + } + if !isNullRound { + return nil, xerrors.Errorf("index corruption: height %d should be a null round but is not", epoch) + } + + return &types.IndexValidation{ + Height: epoch, + IsNullRound: true, + }, nil +} + +func (si *SqliteIndexer) getTipsetCountsAtHeight(ctx context.Context, height abi.ChainEpoch) (revertedCount, nonRevertedCount int, err error) { + err = si.stmts.countTipsetsAtHeightStmt.QueryRowContext(ctx, height).Scan(&revertedCount, &nonRevertedCount) + if err != nil { + if err == sql.ErrNoRows { + // No tipsets found at this height + return 0, 0, nil + } + return 0, 0, xerrors.Errorf("failed to query tipset counts at height %d: %w", height, err) + } + + return revertedCount, nonRevertedCount, nil +} + +type indexedTipSetData struct { + nonRevertedMessageCount uint64 + nonRevertedEventCount uint64 + nonRevertedEventEntriesCount uint64 +} + +// getIndexedTipSetData fetches the indexed tipset data for a tipset +func (si *SqliteIndexer) getIndexedTipSetData(ctx context.Context, ts *types.TipSet) (*indexedTipSetData, error) { + tsKeyCidBytes, err := toTipsetKeyCidBytes(ts) + if err != nil { + return nil, xerrors.Errorf("failed to get tipset key cid: %w", err) + } + + var data indexedTipSetData + err = withTx(ctx, si.db, func(tx *sql.Tx) error { + if err = tx.Stmt(si.stmts.getNonRevertedTipsetMessageCountStmt).QueryRowContext(ctx, tsKeyCidBytes).Scan(&data.nonRevertedMessageCount); err != nil { + return xerrors.Errorf("failed to query non reverted message count: %w", err) + } + + if err = tx.Stmt(si.stmts.getNonRevertedTipsetEventCountStmt).QueryRowContext(ctx, tsKeyCidBytes).Scan(&data.nonRevertedEventCount); err != nil { + return xerrors.Errorf("failed to query non reverted event count: %w", err) + } + + if err = tx.Stmt(si.stmts.getNonRevertedTipsetEventEntriesCountStmt).QueryRowContext(ctx, tsKeyCidBytes).Scan(&data.nonRevertedEventEntriesCount); err != nil { + return xerrors.Errorf("failed to query non reverted event entries count: %w", err) + } + + return nil + }) + + return &data, err +} + +// verifyIndexedData verifies that the indexed data for a tipset is correct +// by comparing the number of messages and events in the chainstore to the number of messages and events indexed. +// +// Notes: +// +// - Events are loaded from the executed messages of the tipset at the next epoch (ts.Height() + 1). +// - This is not a comprehensive verification because we only compare counts, assuming that a match +// means that the entries are correct. A future iteration may compare message and event details to +// confirm that they are what is expected. +func (si *SqliteIndexer) verifyIndexedData(ctx context.Context, ts *types.TipSet, indexedData *indexedTipSetData) (err error) { + tsKeyCid, err := ts.Key().Cid() + if err != nil { + return xerrors.Errorf("failed to get tipset key cid at height %d: %w", ts.Height(), err) + } + + executionTs, err := si.getNextTipset(ctx, ts) + if err != nil { + return xerrors.Errorf("failed to get next tipset for height %d: %w", ts.Height(), err) + } + + // given that `ts` is on the canonical chain and `executionTs` is the next tipset in the chain + // `ts` can not have reverted events + var hasRevertedEventsInTipset bool + err = si.stmts.hasRevertedEventsInTipsetStmt.QueryRowContext(ctx, tsKeyCid.Bytes()).Scan(&hasRevertedEventsInTipset) + if err != nil { + return xerrors.Errorf("failed to check if there are reverted events in tipset for height %d: %w", ts.Height(), err) + } + if hasRevertedEventsInTipset { + return xerrors.Errorf("index corruption: reverted events found for an executed tipset %s at height %d", tsKeyCid, ts.Height()) + } + + executedMsgs, err := si.executedMessagesLoaderFunc(ctx, si.cs, ts, executionTs) + if err != nil { + return xerrors.Errorf("failed to load executed messages for height %d: %w", ts.Height(), err) + } + + var ( + totalEventsCount = uint64(0) + totalEventEntriesCount = uint64(0) + ) + for _, emsg := range executedMsgs { + totalEventsCount += uint64(len(emsg.evs)) + for _, ev := range emsg.evs { + totalEventEntriesCount += uint64(len(ev.Entries)) + } + } + + if totalEventsCount != indexedData.nonRevertedEventCount { + return xerrors.Errorf("event count mismatch for height %d: chainstore has %d, index has %d", ts.Height(), totalEventsCount, indexedData.nonRevertedEventCount) + } + + totalExecutedMsgCount := uint64(len(executedMsgs)) + if totalExecutedMsgCount != indexedData.nonRevertedMessageCount { + return xerrors.Errorf("message count mismatch for height %d: chainstore has %d, index has %d", ts.Height(), totalExecutedMsgCount, indexedData.nonRevertedMessageCount) + } + + if indexedData.nonRevertedEventEntriesCount != totalEventEntriesCount { + return xerrors.Errorf("event entries count mismatch for height %d: chainstore has %d, index has %d", ts.Height(), totalEventEntriesCount, indexedData.nonRevertedEventEntriesCount) + } + + return nil +} + +func (si *SqliteIndexer) backfillMissingTipset(ctx context.Context, ts *types.TipSet) (*types.IndexValidation, error) { + executionTs, err := si.getNextTipset(ctx, ts) + if err != nil { + return nil, xerrors.Errorf("failed to get next tipset at height %d: %w", ts.Height(), err) + } + + backfillFunc := func() error { + return withTx(ctx, si.db, func(tx *sql.Tx) error { + return si.indexTipsetWithParentEvents(ctx, tx, ts, executionTs) + }) + } + + if err := backfillFunc(); err != nil { + if ipld.IsNotFound(err) { + return nil, xerrors.Errorf("failed to backfill tipset at epoch %d: chain store does not contain data: %w", ts.Height(), err) + } + return nil, xerrors.Errorf("failed to backfill tipset at epoch %d; err: %w", ts.Height(), err) + } + + indexedData, err := si.getIndexedTipSetData(ctx, ts) + if err != nil { + return nil, xerrors.Errorf("failed to get indexed tipset data: %w", err) + } + + return &types.IndexValidation{ + TipSetKey: ts.Key(), + Height: ts.Height(), + Backfilled: true, + IndexedMessagesCount: indexedData.nonRevertedMessageCount, + IndexedEventsCount: indexedData.nonRevertedEventCount, + IndexedEventEntriesCount: indexedData.nonRevertedEventEntriesCount, + }, nil +} + +func (si *SqliteIndexer) getNextTipset(ctx context.Context, ts *types.TipSet) (*types.TipSet, error) { + nextEpochTs, err := si.cs.GetTipsetByHeight(ctx, ts.Height()+1, nil, false) + if err != nil { + return nil, xerrors.Errorf("failed to get tipset at height %d: %w", ts.Height()+1, err) + } + + if nextEpochTs.Parents() != ts.Key() { + return nil, xerrors.Errorf("chain forked at height %d; please retry your request; err: %w", ts.Height(), ErrChainForked) + } + + return nextEpochTs, nil +} + +func makeBackfillRequiredErr(height abi.ChainEpoch) error { + return xerrors.Errorf("missing tipset at height %d in the chain index, set backfill flag to true to fix", height) +} diff --git a/chain/index/api_test.go b/chain/index/api_test.go new file mode 100644 index 00000000000..f8c7085cd20 --- /dev/null +++ b/chain/index/api_test.go @@ -0,0 +1,499 @@ +package index + +import ( + "context" + pseudo "math/rand" + "testing" + "time" + + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/chain/types" +) + +func TestValidateIsNullRoundSimple(t *testing.T) { + ctx := context.Background() + seed := time.Now().UnixNano() + t.Logf("seed: %d", seed) + rng := pseudo.New(pseudo.NewSource(seed)) + headHeight := abi.ChainEpoch(100) + + tests := []struct { + name string + epoch abi.ChainEpoch + setupFunc func(*SqliteIndexer) + expectedResult bool + expectError bool + errorContains string + }{ + { + name: "happy path - null round", + epoch: 50, + expectedResult: true, + }, + { + name: "failure - non-null round", + epoch: 50, + setupFunc: func(si *SqliteIndexer) { + insertTipsetMessage(t, si, tipsetMessage{ + tipsetKeyCid: randomCid(t, rng).Bytes(), + height: 50, + reverted: false, + messageCid: randomCid(t, rng).Bytes(), + messageIndex: 0, + }) + }, + expectError: true, + errorContains: "index corruption", + }, + { + name: "edge case - epoch 0", + epoch: 0, + expectedResult: true, + }, + { + name: "edge case - epoch above head", + epoch: headHeight + 1, + expectedResult: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + si, _, _ := setupWithHeadIndexed(t, headHeight, rng) + t.Cleanup(func() { _ = si.Close() }) + + if tt.setupFunc != nil { + tt.setupFunc(si) + } + + res, err := si.validateIsNullRound(ctx, tt.epoch) + + if tt.expectError { + require.Error(t, err) + if tt.errorContains != "" { + require.ErrorContains(t, err, tt.errorContains) + } + } else { + require.NoError(t, err) + require.NotNil(t, res) + require.Equal(t, tt.expectedResult, res.IsNullRound) + require.Equal(t, tt.epoch, res.Height) + } + }) + } +} + +func TestFailureHeadHeight(t *testing.T) { + ctx := context.Background() + seed := time.Now().UnixNano() + t.Logf("seed: %d", seed) + rng := pseudo.New(pseudo.NewSource(seed)) + headHeight := abi.ChainEpoch(100) + + si, head, _ := setupWithHeadIndexed(t, headHeight, rng) + t.Cleanup(func() { _ = si.Close() }) + si.Start() + + _, err := si.ChainValidateIndex(ctx, head.Height(), false) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot validate index at epoch") +} + +func TestBackfillNullRound(t *testing.T) { + ctx := context.Background() + seed := time.Now().UnixNano() + t.Logf("seed: %d", seed) + rng := pseudo.New(pseudo.NewSource(seed)) + headHeight := abi.ChainEpoch(100) + + si, _, cs := setupWithHeadIndexed(t, headHeight, rng) + t.Cleanup(func() { _ = si.Close() }) + si.Start() + + nullRoundEpoch := abi.ChainEpoch(50) + nonNullRoundEpoch := abi.ChainEpoch(51) + + // Create a tipset with a height different from the requested epoch + nonNullTs := fakeTipSet(t, rng, nonNullRoundEpoch, []cid.Cid{}) + + // Set up the chainstore to return the non-null tipset for the null round epoch + cs.SetTipsetByHeightAndKey(nullRoundEpoch, nonNullTs.Key(), nonNullTs) + + // Attempt to validate the null round epoch + result, err := si.ChainValidateIndex(ctx, nullRoundEpoch, true) + require.NoError(t, err) + require.NotNil(t, result) + require.False(t, result.Backfilled) + require.True(t, result.IsNullRound) +} + +func TestBackfillReturnsError(t *testing.T) { + ctx := context.Background() + seed := time.Now().UnixNano() + t.Logf("seed: %d", seed) + rng := pseudo.New(pseudo.NewSource(seed)) + headHeight := abi.ChainEpoch(100) + + si, _, cs := setupWithHeadIndexed(t, headHeight, rng) + t.Cleanup(func() { _ = si.Close() }) + si.Start() + + missingEpoch := abi.ChainEpoch(50) + + // Create a tipset for the missing epoch, but don't index it + missingTs := fakeTipSet(t, rng, missingEpoch, []cid.Cid{}) + cs.SetTipsetByHeightAndKey(missingEpoch, missingTs.Key(), missingTs) + + // Attempt to validate the missing epoch with backfill flag set to false + _, err := si.ChainValidateIndex(ctx, missingEpoch, false) + require.Error(t, err) + require.ErrorContains(t, err, "missing tipset at height 50 in the chain index") +} + +func TestBackfillMissingEpoch(t *testing.T) { + ctx := context.Background() + seed := time.Now().UnixNano() + t.Logf("seed: %d", seed) + rng := pseudo.New(pseudo.NewSource(seed)) + headHeight := abi.ChainEpoch(100) + + si, _, cs := setupWithHeadIndexed(t, headHeight, rng) + t.Cleanup(func() { _ = si.Close() }) + si.Start() + + // Initialize address resolver + si.SetActorToDelegatedAddresFunc(func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool) { + idAddr, err := address.NewIDAddress(uint64(emitter)) + if err != nil { + return address.Undef, false + } + return idAddr, true + }) + + missingEpoch := abi.ChainEpoch(50) + + parentTs := fakeTipSet(t, rng, missingEpoch-1, []cid.Cid{}) + cs.SetTipsetByHeightAndKey(missingEpoch-1, parentTs.Key(), parentTs) + + missingTs := fakeTipSet(t, rng, missingEpoch, parentTs.Cids()) + cs.SetTipsetByHeightAndKey(missingEpoch, missingTs.Key(), missingTs) + + executionTs := fakeTipSet(t, rng, missingEpoch+1, missingTs.Key().Cids()) + cs.SetTipsetByHeightAndKey(missingEpoch+1, executionTs.Key(), executionTs) + + // Create fake messages and events + fakeMsg := fakeMessage(randomIDAddr(t, rng), randomIDAddr(t, rng)) + fakeEvent := fakeEvent(1, []kv{{k: "test", v: []byte("value")}, {k: "test2", v: []byte("value2")}}, nil) + + executedMsg := executedMessage{ + msg: fakeMsg, + evs: []types.Event{*fakeEvent}, + } + + cs.SetMessagesForTipset(missingTs, []types.ChainMsg{fakeMsg}) + si.setExecutedMessagesLoaderFunc(func(ctx context.Context, cs ChainStore, msgTs, rctTs *types.TipSet) ([]executedMessage, error) { + if msgTs.Height() == missingTs.Height() { + return []executedMessage{executedMsg}, nil + } + return nil, nil + }) + + // Attempt to validate and backfill the missing epoch + result, err := si.ChainValidateIndex(ctx, missingEpoch, true) + require.NoError(t, err) + require.NotNil(t, result) + require.True(t, result.Backfilled) + require.EqualValues(t, missingEpoch, result.Height) + require.Equal(t, uint64(1), result.IndexedMessagesCount) + require.Equal(t, uint64(1), result.IndexedEventsCount) + require.Equal(t, uint64(2), result.IndexedEventEntriesCount) + + // Verify that the epoch is now indexed + verificationResult, err := si.ChainValidateIndex(ctx, missingEpoch, false) + require.NoError(t, err) + require.NotNil(t, verificationResult) + require.False(t, verificationResult.Backfilled) + require.Equal(t, result.IndexedMessagesCount, verificationResult.IndexedMessagesCount) + require.Equal(t, result.IndexedEventsCount, verificationResult.IndexedEventsCount) + require.Equal(t, result.IndexedEventEntriesCount, verificationResult.IndexedEventEntriesCount) +} + +func TestIndexCorruption(t *testing.T) { + ctx := context.Background() + seed := time.Now().UnixNano() + t.Logf("seed: %d", seed) + rng := pseudo.New(pseudo.NewSource(seed)) + headHeight := abi.ChainEpoch(100) + + tests := []struct { + name string + setupFunc func(*testing.T, *SqliteIndexer, *dummyChainStore) + epoch abi.ChainEpoch + errorContains string + }{ + { + name: "only reverted tipsets", + setupFunc: func(t *testing.T, si *SqliteIndexer, cs *dummyChainStore) { + epoch := abi.ChainEpoch(50) + ts := fakeTipSet(t, rng, epoch, []cid.Cid{}) + cs.SetTipsetByHeightAndKey(epoch, ts.Key(), ts) + keyBz, err := ts.Key().Cid() + require.NoError(t, err) + + insertTipsetMessage(t, si, tipsetMessage{ + tipsetKeyCid: keyBz.Bytes(), + height: uint64(epoch), + reverted: true, + messageCid: randomCid(t, rng).Bytes(), + messageIndex: 0, + }) + }, + epoch: 50, + errorContains: "index corruption: height 50 only has reverted tipsets", + }, + { + name: "multiple non-reverted tipsets", + setupFunc: func(t *testing.T, si *SqliteIndexer, cs *dummyChainStore) { + epoch := abi.ChainEpoch(50) + ts1 := fakeTipSet(t, rng, epoch, []cid.Cid{}) + ts2 := fakeTipSet(t, rng, epoch, []cid.Cid{}) + cs.SetTipsetByHeightAndKey(epoch, ts1.Key(), ts1) + + t1Bz, err := toTipsetKeyCidBytes(ts1) + require.NoError(t, err) + t2Bz, err := toTipsetKeyCidBytes(ts2) + require.NoError(t, err) + + insertTipsetMessage(t, si, tipsetMessage{ + tipsetKeyCid: t1Bz, + height: uint64(epoch), + reverted: false, + messageCid: randomCid(t, rng).Bytes(), + messageIndex: 0, + }) + insertTipsetMessage(t, si, tipsetMessage{ + tipsetKeyCid: t2Bz, + height: uint64(epoch), + reverted: false, + messageCid: randomCid(t, rng).Bytes(), + messageIndex: 0, + }) + }, + epoch: 50, + errorContains: "index corruption: height 50 has multiple non-reverted tipsets", + }, + { + name: "tipset key mismatch", + setupFunc: func(_ *testing.T, si *SqliteIndexer, cs *dummyChainStore) { + epoch := abi.ChainEpoch(50) + ts1 := fakeTipSet(t, rng, epoch, []cid.Cid{}) + ts2 := fakeTipSet(t, rng, epoch, []cid.Cid{}) + cs.SetTipsetByHeightAndKey(epoch, ts1.Key(), ts1) + insertTipsetMessage(t, si, tipsetMessage{ + tipsetKeyCid: ts2.Key().Cids()[0].Bytes(), + height: uint64(epoch), + reverted: false, + messageCid: randomCid(t, rng).Bytes(), + messageIndex: 0, + }) + }, + epoch: 50, + errorContains: "index corruption: indexed tipset at height 50 has key", + }, + { + name: "reverted events for executed tipset", + setupFunc: func(_ *testing.T, si *SqliteIndexer, cs *dummyChainStore) { + epoch := abi.ChainEpoch(50) + ts := fakeTipSet(t, rng, epoch, []cid.Cid{}) + cs.SetTipsetByHeightAndKey(epoch, ts.Key(), ts) + keyBz, err := ts.Key().Cid() + require.NoError(t, err) + + messageID := insertTipsetMessage(t, si, tipsetMessage{ + tipsetKeyCid: keyBz.Bytes(), + height: uint64(epoch), + reverted: false, + messageCid: randomCid(t, rng).Bytes(), + messageIndex: 0, + }) + insertEvent(t, si, event{ + messageID: messageID, + eventIndex: 0, + emitterId: 1, + emitterAddr: randomIDAddr(t, rng).Bytes(), + reverted: true, + }) + cs.SetTipsetByHeightAndKey(epoch+1, fakeTipSet(t, rng, epoch+1, ts.Key().Cids()).Key(), fakeTipSet(t, rng, epoch+1, ts.Key().Cids())) + }, + epoch: 50, + errorContains: "index corruption: reverted events found for an executed tipset", + }, + { + name: "message count mismatch", + setupFunc: func(_ *testing.T, si *SqliteIndexer, cs *dummyChainStore) { + epoch := abi.ChainEpoch(50) + ts := fakeTipSet(t, rng, epoch, []cid.Cid{}) + cs.SetTipsetByHeightAndKey(epoch, ts.Key(), ts) + keyBz, err := ts.Key().Cid() + require.NoError(t, err) + + // Insert two messages in the index + insertTipsetMessage(t, si, tipsetMessage{ + tipsetKeyCid: keyBz.Bytes(), + height: uint64(epoch), + reverted: false, + messageCid: randomCid(t, rng).Bytes(), + messageIndex: 0, + }) + insertTipsetMessage(t, si, tipsetMessage{ + tipsetKeyCid: keyBz.Bytes(), + height: uint64(epoch), + reverted: false, + messageCid: randomCid(t, rng).Bytes(), + messageIndex: 1, + }) + + // Setup dummy event loader + si.setExecutedMessagesLoaderFunc(func(ctx context.Context, cs ChainStore, msgTs, rctTs *types.TipSet) ([]executedMessage, error) { + return []executedMessage{{msg: fakeMessage(randomIDAddr(t, rng), randomIDAddr(t, rng))}}, nil + }) + + // Set up the next tipset for event execution + nextTs := fakeTipSet(t, rng, epoch+1, ts.Key().Cids()) + cs.SetTipsetByHeightAndKey(epoch+1, nextTs.Key(), nextTs) + }, + epoch: 50, + errorContains: "failed to verify indexed data at height 50: message count mismatch for height 50: chainstore has 1, index has 2", + }, + { + name: "event count mismatch", + setupFunc: func(_ *testing.T, si *SqliteIndexer, cs *dummyChainStore) { + epoch := abi.ChainEpoch(50) + ts := fakeTipSet(t, rng, epoch, []cid.Cid{}) + cs.SetTipsetByHeightAndKey(epoch, ts.Key(), ts) + keyBz, err := ts.Key().Cid() + require.NoError(t, err) + + // Insert one message in the index + messageID := insertTipsetMessage(t, si, tipsetMessage{ + tipsetKeyCid: keyBz.Bytes(), + height: uint64(epoch), + reverted: false, + messageCid: randomCid(t, rng).Bytes(), + messageIndex: 0, + }) + + // Insert two events for the message + insertEvent(t, si, event{ + messageID: messageID, + eventIndex: 0, + emitterId: 2, + emitterAddr: randomIDAddr(t, rng).Bytes(), + reverted: false, + }) + insertEvent(t, si, event{ + messageID: messageID, + eventIndex: 1, + emitterId: 3, + emitterAddr: randomIDAddr(t, rng).Bytes(), + reverted: false, + }) + + // Setup dummy event loader to return only one event + si.setExecutedMessagesLoaderFunc(func(ctx context.Context, cs ChainStore, msgTs, rctTs *types.TipSet) ([]executedMessage, error) { + return []executedMessage{ + { + msg: fakeMessage(randomIDAddr(t, rng), randomIDAddr(t, rng)), + evs: []types.Event{*fakeEvent(1, []kv{{k: "test", v: []byte("value")}}, nil)}, + }, + }, nil + }) + + // Set up the next tipset for event execution + nextTs := fakeTipSet(t, rng, epoch+1, ts.Key().Cids()) + cs.SetTipsetByHeightAndKey(epoch+1, nextTs.Key(), nextTs) + }, + epoch: 50, + errorContains: "failed to verify indexed data at height 50: event count mismatch for height 50: chainstore has 1, index has 2", + }, + { + name: "event entries count mismatch", + setupFunc: func(_ *testing.T, si *SqliteIndexer, cs *dummyChainStore) { + epoch := abi.ChainEpoch(50) + ts := fakeTipSet(t, rng, epoch, []cid.Cid{}) + cs.SetTipsetByHeightAndKey(epoch, ts.Key(), ts) + keyBz, err := ts.Key().Cid() + require.NoError(t, err) + + // Insert one message in the index + messageID := insertTipsetMessage(t, si, tipsetMessage{ + tipsetKeyCid: keyBz.Bytes(), + height: uint64(epoch), + reverted: false, + messageCid: randomCid(t, rng).Bytes(), + messageIndex: 0, + }) + + // Insert one event with two entries for the message + eventID := insertEvent(t, si, event{ + messageID: messageID, + eventIndex: 0, + emitterId: 4, + emitterAddr: randomIDAddr(t, rng).Bytes(), + reverted: false, + }) + insertEventEntry(t, si, eventEntry{ + eventID: eventID, + indexed: true, + flags: []byte{0x01}, + key: "key1", + codec: 1, + value: []byte("value1"), + }) + insertEventEntry(t, si, eventEntry{ + eventID: eventID, + indexed: true, + flags: []byte{0x00}, + key: "key2", + codec: 2, + value: []byte("value2"), + }) + + // Setup dummy event loader to return one event with only one entry + si.setExecutedMessagesLoaderFunc(func(ctx context.Context, cs ChainStore, msgTs, rctTs *types.TipSet) ([]executedMessage, error) { + return []executedMessage{ + { + msg: fakeMessage(randomIDAddr(t, rng), randomIDAddr(t, rng)), + evs: []types.Event{*fakeEvent(1, []kv{{k: "key1", v: []byte("value1")}}, nil)}, + }, + }, nil + }) + + // Set up the next tipset for event execution + nextTs := fakeTipSet(t, rng, epoch+1, ts.Key().Cids()) + cs.SetTipsetByHeightAndKey(epoch+1, nextTs.Key(), nextTs) + }, + epoch: 50, + errorContains: "failed to verify indexed data at height 50: event entries count mismatch for height 50: chainstore has 1, index has 2", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + si, _, cs := setupWithHeadIndexed(t, headHeight, rng) + t.Cleanup(func() { _ = si.Close() }) + si.Start() + + tt.setupFunc(t, si, cs) + + _, err := si.ChainValidateIndex(ctx, tt.epoch, false) + require.Error(t, err) + require.Contains(t, err.Error(), tt.errorContains) + }) + } +} diff --git a/chain/index/ddls.go b/chain/index/ddls.go new file mode 100644 index 00000000000..6a4f0d40343 --- /dev/null +++ b/chain/index/ddls.go @@ -0,0 +1,109 @@ +package index + +import "database/sql" + +const DefaultDbFilename = "chainindex.db" + +var ddls = []string{ + `CREATE TABLE IF NOT EXISTS tipset_message ( + id INTEGER PRIMARY KEY, + tipset_key_cid BLOB NOT NULL, + height INTEGER NOT NULL, + reverted INTEGER NOT NULL, + message_cid BLOB, + message_index INTEGER, + UNIQUE (tipset_key_cid, message_cid) + )`, + + `CREATE TABLE IF NOT EXISTS eth_tx_hash ( + tx_hash TEXT PRIMARY KEY, + message_cid BLOB NOT NULL, + inserted_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + )`, + + `CREATE TABLE IF NOT EXISTS event ( + id INTEGER PRIMARY KEY, + message_id INTEGER NOT NULL, + event_index INTEGER NOT NULL, + emitter_id INTEGER NOT NULL, + emitter_addr BLOB, + reverted INTEGER NOT NULL, + FOREIGN KEY (message_id) REFERENCES tipset_message(id) ON DELETE CASCADE, + UNIQUE (message_id, event_index) + )`, + + `CREATE TABLE IF NOT EXISTS event_entry ( + event_id INTEGER NOT NULL, + indexed INTEGER NOT NULL, + flags BLOB NOT NULL, + key TEXT NOT NULL, + codec INTEGER, + value BLOB NOT NULL, + FOREIGN KEY (event_id) REFERENCES event(id) ON DELETE CASCADE + )`, + + `CREATE INDEX IF NOT EXISTS insertion_time_index ON eth_tx_hash (inserted_at)`, + + `CREATE INDEX IF NOT EXISTS idx_message_cid ON tipset_message (message_cid)`, + + `CREATE INDEX IF NOT EXISTS idx_tipset_key_cid ON tipset_message (tipset_key_cid)`, + + `CREATE INDEX IF NOT EXISTS idx_event_message_id ON event (message_id)`, + + `CREATE INDEX IF NOT EXISTS idx_height ON tipset_message (height)`, + + `CREATE INDEX IF NOT EXISTS event_entry_event_id ON event_entry(event_id)`, +} + +// preparedStatementMapping returns a map of fields of the preparedStatements struct to the SQL +// query that should be prepared for that field. This is used to prepare all the statements in +// the preparedStatements struct. +func preparedStatementMapping(ps *preparedStatements) map[**sql.Stmt]string { + return map[**sql.Stmt]string{ + &ps.getNonRevertedMsgInfoStmt: "SELECT tipset_key_cid, height FROM tipset_message WHERE message_cid = ? AND reverted = 0 LIMIT 1", + &ps.getMsgCidFromEthHashStmt: "SELECT message_cid FROM eth_tx_hash WHERE tx_hash = ? LIMIT 1", + &ps.insertEthTxHashStmt: "INSERT INTO eth_tx_hash (tx_hash, message_cid) VALUES (?, ?) ON CONFLICT (tx_hash) DO UPDATE SET inserted_at = CURRENT_TIMESTAMP", + &ps.insertTipsetMessageStmt: "INSERT INTO tipset_message (tipset_key_cid, height, reverted, message_cid, message_index) VALUES (?, ?, ?, ?, ?) ON CONFLICT (tipset_key_cid, message_cid) DO UPDATE SET reverted = 0", + &ps.hasTipsetStmt: "SELECT EXISTS(SELECT 1 FROM tipset_message WHERE tipset_key_cid = ?)", + &ps.updateTipsetToNonRevertedStmt: "UPDATE tipset_message SET reverted = 0 WHERE tipset_key_cid = ?", + &ps.updateTipsetToRevertedStmt: "UPDATE tipset_message SET reverted = 1 WHERE tipset_key_cid = ?", + &ps.removeTipsetsBeforeHeightStmt: "DELETE FROM tipset_message WHERE height < ?", + &ps.removeEthHashesOlderThanStmt: "DELETE FROM eth_tx_hash WHERE inserted_at < datetime('now', ?)", + &ps.updateTipsetsToRevertedFromHeightStmt: "UPDATE tipset_message SET reverted = 1 WHERE height >= ?", + &ps.updateEventsToRevertedFromHeightStmt: "UPDATE event SET reverted = 1 WHERE message_id IN (SELECT id FROM tipset_message WHERE height >= ?)", + &ps.isIndexEmptyStmt: "SELECT NOT EXISTS(SELECT 1 FROM tipset_message LIMIT 1)", + &ps.getMinNonRevertedHeightStmt: "SELECT MIN(height) FROM tipset_message WHERE reverted = 0", + &ps.hasNonRevertedTipsetStmt: "SELECT EXISTS(SELECT 1 FROM tipset_message WHERE tipset_key_cid = ? AND reverted = 0)", + &ps.updateEventsToRevertedStmt: "UPDATE event SET reverted = 1 WHERE message_id IN (SELECT id FROM tipset_message WHERE tipset_key_cid = ?)", + &ps.updateEventsToNonRevertedStmt: "UPDATE event SET reverted = 0 WHERE message_id IN (SELECT id FROM tipset_message WHERE tipset_key_cid = ?)", + &ps.getMsgIdForMsgCidAndTipsetStmt: "SELECT id FROM tipset_message WHERE tipset_key_cid = ? AND message_cid = ? AND reverted = 0", + &ps.insertEventStmt: "INSERT INTO event (message_id, event_index, emitter_addr, reverted) VALUES (?, ?, ?, ?) ON CONFLICT (message_id, event_index) DO UPDATE SET reverted = 0", + &ps.insertEventEntryStmt: "INSERT INTO event_entry (event_id, indexed, flags, key, codec, value) VALUES (?, ?, ?, ?, ?, ?)", + &ps.hasNullRoundAtHeightStmt: "SELECT NOT EXISTS(SELECT 1 FROM tipset_message WHERE height = ?)", + &ps.getNonRevertedTipsetAtHeightStmt: "SELECT tipset_key_cid FROM tipset_message WHERE height = ? AND reverted = 0 LIMIT 1", + &ps.countTipsetsAtHeightStmt: "SELECT COUNT(CASE WHEN reverted = 1 THEN 1 END) AS reverted_count, COUNT(CASE WHEN reverted = 0 THEN 1 END) AS non_reverted_count FROM (SELECT tipset_key_cid, MAX(reverted) AS reverted FROM tipset_message WHERE height = ? GROUP BY tipset_key_cid) AS unique_tipsets", + &ps.getNonRevertedTipsetMessageCountStmt: "SELECT COUNT(*) FROM tipset_message WHERE tipset_key_cid = ? AND reverted = 0 AND message_cid IS NOT NULL", + &ps.getNonRevertedTipsetEventCountStmt: "SELECT COUNT(*) FROM event WHERE reverted = 0 AND message_id IN (SELECT id FROM tipset_message WHERE tipset_key_cid = ? AND reverted = 0)", + &ps.hasRevertedEventsInTipsetStmt: "SELECT EXISTS(SELECT 1 FROM event WHERE reverted = 1 AND message_id IN (SELECT id FROM tipset_message WHERE tipset_key_cid = ?))", + &ps.getNonRevertedTipsetEventEntriesCountStmt: "SELECT COUNT(ee.event_id) AS entry_count FROM event_entry ee JOIN event e ON ee.event_id = e.id JOIN tipset_message tm ON e.message_id = tm.id WHERE tm.tipset_key_cid = ? AND tm.reverted = 0", + &ps.removeRevertedTipsetsBeforeHeightStmt: "DELETE FROM tipset_message WHERE reverted = 1 AND height < ?", + &ps.getNonRevertedMsgInfoStmt: "SELECT tipset_key_cid, height FROM tipset_message WHERE message_cid = ? AND reverted = 0 LIMIT 1", + &ps.getMsgCidFromEthHashStmt: "SELECT message_cid FROM eth_tx_hash WHERE tx_hash = ? LIMIT 1", + &ps.insertEthTxHashStmt: "INSERT INTO eth_tx_hash (tx_hash, message_cid) VALUES (?, ?) ON CONFLICT (tx_hash) DO UPDATE SET inserted_at = CURRENT_TIMESTAMP", + &ps.insertTipsetMessageStmt: "INSERT INTO tipset_message (tipset_key_cid, height, reverted, message_cid, message_index) VALUES (?, ?, ?, ?, ?) ON CONFLICT (tipset_key_cid, message_cid) DO UPDATE SET reverted = 0", + &ps.hasTipsetStmt: "SELECT EXISTS(SELECT 1 FROM tipset_message WHERE tipset_key_cid = ?)", + &ps.updateTipsetToNonRevertedStmt: "UPDATE tipset_message SET reverted = 0 WHERE tipset_key_cid = ?", + &ps.updateTipsetToRevertedStmt: "UPDATE tipset_message SET reverted = 1 WHERE tipset_key_cid = ?", + &ps.removeTipsetsBeforeHeightStmt: "DELETE FROM tipset_message WHERE height < ?", + &ps.removeEthHashesOlderThanStmt: "DELETE FROM eth_tx_hash WHERE inserted_at < datetime('now', ?)", + &ps.updateTipsetsToRevertedFromHeightStmt: "UPDATE tipset_message SET reverted = 1 WHERE height >= ?", + &ps.updateEventsToRevertedFromHeightStmt: "UPDATE event SET reverted = 1 WHERE message_id IN (SELECT id FROM tipset_message WHERE height >= ?)", + &ps.getMinNonRevertedHeightStmt: "SELECT MIN(height) FROM tipset_message WHERE reverted = 0", + &ps.hasNonRevertedTipsetStmt: "SELECT EXISTS(SELECT 1 FROM tipset_message WHERE tipset_key_cid = ? AND reverted = 0)", + &ps.updateEventsToRevertedStmt: "UPDATE event SET reverted = 1 WHERE message_id IN (SELECT id FROM tipset_message WHERE tipset_key_cid = ?)", + &ps.updateEventsToNonRevertedStmt: "UPDATE event SET reverted = 0 WHERE message_id IN (SELECT id FROM tipset_message WHERE tipset_key_cid = ?)", + &ps.getMsgIdForMsgCidAndTipsetStmt: "SELECT id FROM tipset_message WHERE tipset_key_cid = ? AND message_cid = ? AND reverted = 0 LIMIT 1", + &ps.insertEventStmt: "INSERT INTO event (message_id, event_index, emitter_id, emitter_addr, reverted) VALUES (?, ?, ?, ?, ?)", + &ps.insertEventEntryStmt: "INSERT INTO event_entry (event_id, indexed, flags, key, codec, value) VALUES (?, ?, ?, ?, ?, ?)", + } +} diff --git a/chain/index/ddls_test.go b/chain/index/ddls_test.go new file mode 100644 index 00000000000..e9673a440f1 --- /dev/null +++ b/chain/index/ddls_test.go @@ -0,0 +1,743 @@ +package index + +import ( + "database/sql" + "testing" + + "github.com/stretchr/testify/require" +) + +const ( + tipsetKeyCid1 = "test_tipset_key" + tipsetKeyCid2 = "test_tipset_key_2" + messageCid1 = "test_message_cid" + messageCid2 = "test_message_cid_2" + emitterAddr1 = "test_emitter_addr" +) + +func TestHasRevertedEventsInTipsetStmt(t *testing.T) { + s, err := NewSqliteIndexer(":memory:", nil, 0, false, 0) + require.NoError(t, err) + + // running on empty DB should return false + verifyHasRevertedEventsInTipsetStmt(t, s, []byte(tipsetKeyCid1), false) + + // Insert tipset with a reverted event + ts := tipsetMessage{ + tipsetKeyCid: []byte(tipsetKeyCid1), + height: 1, + reverted: false, + messageCid: []byte(messageCid1), + messageIndex: 0, + } + messageID := insertTipsetMessage(t, s, ts) + + // this event will be un-reverted later + insertEvent(t, s, event{ + messageID: messageID, + eventIndex: 0, + emitterId: 1, + emitterAddr: []byte(emitterAddr1), + reverted: true, + }) + + // this event should not be un-reverted + ts = tipsetMessage{ + tipsetKeyCid: []byte(tipsetKeyCid2), + height: 1, + reverted: false, + messageCid: []byte(messageCid2), + messageIndex: 0, + } + messageID2 := insertTipsetMessage(t, s, ts) + insertEvent(t, s, event{ + messageID: messageID2, + eventIndex: 0, + emitterId: 2, + emitterAddr: []byte(emitterAddr1), + reverted: true, + }) + + // Verify `hasRevertedEventsInTipset` returns true + verifyHasRevertedEventsInTipsetStmt(t, s, []byte(tipsetKeyCid1), true) + verifyHasRevertedEventsInTipsetStmt(t, s, []byte(tipsetKeyCid2), true) + + // change event to non-reverted + updateEventsToNonReverted(t, s, []byte(tipsetKeyCid1)) + + // Verify `hasRevertedEventsInTipset` returns false + verifyHasRevertedEventsInTipsetStmt(t, s, []byte(tipsetKeyCid1), false) + verifyHasRevertedEventsInTipsetStmt(t, s, []byte(tipsetKeyCid2), true) +} + +func TestGetNonRevertedTipsetCountStmts(t *testing.T) { + s, err := NewSqliteIndexer(":memory:", nil, 0, false, 0) + require.NoError(t, err) + + // running on empty DB should return 0 + verifyNonRevertedEventEntriesCount(t, s, []byte(tipsetKeyCid1), 0) + verifyNonRevertedEventCount(t, s, []byte(tipsetKeyCid1), 0) + verifyNonRevertedMessageCount(t, s, []byte(tipsetKeyCid1), 0) + + // Insert non-reverted tipset + messageID := insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: []byte(tipsetKeyCid1), + height: 1, + reverted: false, + messageCid: []byte(messageCid1), + messageIndex: 0, + }) + + // Insert event + eventID1 := insertEvent(t, s, event{ + messageID: messageID, + eventIndex: 0, + emitterId: 1, + emitterAddr: []byte(emitterAddr1), + reverted: false, + }) + eventID2 := insertEvent(t, s, event{ + messageID: messageID, + eventIndex: 1, + emitterId: 2, + emitterAddr: []byte(emitterAddr1), + reverted: false, + }) + + // Insert event entry + insertEventEntry(t, s, eventEntry{ + eventID: eventID1, + indexed: true, + flags: []byte("test_flags"), + key: "test_key", + codec: 1, + value: []byte("test_value"), + }) + insertEventEntry(t, s, eventEntry{ + eventID: eventID2, + indexed: true, + flags: []byte("test_flags2"), + key: "test_key2", + codec: 2, + value: []byte("test_value2"), + }) + + // verify 2 event entries + verifyNonRevertedEventEntriesCount(t, s, []byte(tipsetKeyCid1), 2) + + // Verify event count + verifyNonRevertedEventCount(t, s, []byte(tipsetKeyCid1), 2) + + // verify message count is 1 + verifyNonRevertedMessageCount(t, s, []byte(tipsetKeyCid1), 1) + + // mark tipset as reverted + revertTipset(t, s, []byte(tipsetKeyCid1)) + + // Verify `getNonRevertedTipsetEventEntriesCountStmt` returns 0 + verifyNonRevertedEventEntriesCount(t, s, []byte(tipsetKeyCid1), 0) + + // verify event count is 0 + verifyNonRevertedEventCount(t, s, []byte(tipsetKeyCid1), 0) + + // verify message count is 0 + verifyNonRevertedMessageCount(t, s, []byte(tipsetKeyCid1), 0) +} + +func TestUpdateTipsetToNonRevertedStmt(t *testing.T) { + s, err := NewSqliteIndexer(":memory:", nil, 0, false, 0) + require.NoError(t, err) + + // insert a reverted tipset + ts := tipsetMessage{ + tipsetKeyCid: []byte(tipsetKeyCid1), + height: 1, + reverted: true, + messageCid: []byte(messageCid1), + messageIndex: 0, + } + + // Insert tipset + messageId := insertTipsetMessage(t, s, ts) + + res, err := s.stmts.updateTipsetToNonRevertedStmt.Exec([]byte(tipsetKeyCid1)) + require.NoError(t, err) + + rowsAffected, err := res.RowsAffected() + require.NoError(t, err) + require.Equal(t, int64(1), rowsAffected) + + // verify the tipset is not reverted + ts.reverted = false + verifyTipsetMessage(t, s, messageId, ts) +} + +func TestHasNullRoundAtHeightStmt(t *testing.T) { + s, err := NewSqliteIndexer(":memory:", nil, 0, false, 0) + require.NoError(t, err) + + // running on empty DB should return true + verifyHasNullRoundAtHeightStmt(t, s, 1, true) + verifyHasNullRoundAtHeightStmt(t, s, 0, true) + + // insert tipset + insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: []byte(tipsetKeyCid1), + height: 1, + reverted: false, + messageCid: []byte(messageCid1), + messageIndex: 0, + }) + + // verify not a null round + verifyHasNullRoundAtHeightStmt(t, s, 1, false) +} + +func TestHasTipsetStmt(t *testing.T) { + s, err := NewSqliteIndexer(":memory:", nil, 0, false, 0) + require.NoError(t, err) + + // running on empty DB should return false + verifyHasTipsetStmt(t, s, []byte(tipsetKeyCid1), false) + + // insert tipset + insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: []byte(tipsetKeyCid1), + height: 1, + reverted: false, + messageCid: []byte(messageCid1), + messageIndex: 0, + }) + + // verify tipset exists + verifyHasTipsetStmt(t, s, []byte(tipsetKeyCid1), true) + + // verify non-existent tipset + verifyHasTipsetStmt(t, s, []byte("non_existent_tipset_key"), false) +} + +func TestUpdateEventsToRevertedStmt(t *testing.T) { + s, err := NewSqliteIndexer(":memory:", nil, 0, false, 0) + require.NoError(t, err) + + // Insert a non-reverted tipset + messageID := insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: []byte(tipsetKeyCid1), + height: 1, + reverted: false, + messageCid: []byte(messageCid1), + messageIndex: 0, + }) + + // Insert non-reverted events + insertEvent(t, s, event{ + messageID: messageID, + eventIndex: 0, + emitterId: 1, + emitterAddr: []byte(emitterAddr1), + reverted: false, + }) + insertEvent(t, s, event{ + messageID: messageID, + eventIndex: 1, + emitterId: 2, + emitterAddr: []byte(emitterAddr1), + reverted: false, + }) + + // Verify events are not reverted + var count int + err = s.db.QueryRow("SELECT COUNT(*) FROM event WHERE reverted = 0 AND message_id = ?", messageID).Scan(&count) + require.NoError(t, err) + require.Equal(t, 2, count) + + // Execute updateEventsToRevertedStmt + _, err = s.stmts.updateEventsToRevertedStmt.Exec([]byte(tipsetKeyCid1)) + require.NoError(t, err) + + // Verify events are now reverted + err = s.db.QueryRow("SELECT COUNT(*) FROM event WHERE reverted = 1 AND message_id = ?", messageID).Scan(&count) + require.NoError(t, err) + require.Equal(t, 2, count) + + // Verify no non-reverted events remain + err = s.db.QueryRow("SELECT COUNT(*) FROM event WHERE reverted = 0 AND message_id = ?", messageID).Scan(&count) + require.NoError(t, err) + require.Equal(t, 0, count) +} + +func TestCountTipsetsAtHeightStmt(t *testing.T) { + s, err := NewSqliteIndexer(":memory:", nil, 0, false, 0) + require.NoError(t, err) + + // Test empty DB + verifyCountTipsetsAtHeightStmt(t, s, 1, 0, 0) + + // Test 0,1 case + insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: []byte("test_tipset_key_1"), + height: 1, + reverted: false, + messageCid: []byte("test_message_cid_1"), + messageIndex: 0, + }) + verifyCountTipsetsAtHeightStmt(t, s, 1, 0, 1) + + // Test 0,2 case + insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: []byte("test_tipset_key_2"), + height: 1, + reverted: false, + messageCid: []byte("test_message_cid_2"), + messageIndex: 0, + }) + verifyCountTipsetsAtHeightStmt(t, s, 1, 0, 2) + + // Test 1,2 case + insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: []byte("test_tipset_key_3"), + height: 1, + reverted: true, + messageCid: []byte("test_message_cid_3"), + messageIndex: 0, + }) + verifyCountTipsetsAtHeightStmt(t, s, 1, 1, 2) + + // Test 2,2 case + insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: []byte("test_tipset_key_4"), + height: 1, + reverted: true, + messageCid: []byte("test_message_cid_4"), + messageIndex: 0, + }) + verifyCountTipsetsAtHeightStmt(t, s, 1, 2, 2) +} + +func TestNonRevertedTipsetAtHeightStmt(t *testing.T) { + s, err := NewSqliteIndexer(":memory:", nil, 0, false, 0) + require.NoError(t, err) + + // Test empty DB + var et []byte + err = s.stmts.getNonRevertedTipsetAtHeightStmt.QueryRow(10).Scan(&et) + require.Equal(t, sql.ErrNoRows, err) + + // Insert non-reverted tipset + insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: []byte("test_tipset_key_1"), + height: 10, + reverted: false, + messageCid: []byte("test_message_cid_1"), + messageIndex: 0, + }) + + // Insert reverted tipset at same height + insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: []byte("test_tipset_key_2"), + height: 10, + reverted: true, + messageCid: []byte("test_message_cid_2"), + messageIndex: 0, + }) + + // Verify getNonRevertedTipsetAtHeightStmt returns the non-reverted tipset + var tipsetKeyCid []byte + err = s.stmts.getNonRevertedTipsetAtHeightStmt.QueryRow(10).Scan(&tipsetKeyCid) + require.NoError(t, err) + require.Equal(t, []byte("test_tipset_key_1"), tipsetKeyCid) + + // Insert another non-reverted tipset at a different height + insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: []byte("test_tipset_key_3"), + height: 20, + reverted: false, + messageCid: []byte("test_message_cid_3"), + messageIndex: 0, + }) + + // Verify getNonRevertedTipsetAtHeightStmt returns the correct tipset for the new height + err = s.stmts.getNonRevertedTipsetAtHeightStmt.QueryRow(20).Scan(&tipsetKeyCid) + require.NoError(t, err) + require.Equal(t, []byte("test_tipset_key_3"), tipsetKeyCid) + + // Test with a height that has no tipset + err = s.stmts.getNonRevertedTipsetAtHeightStmt.QueryRow(30).Scan(&tipsetKeyCid) + require.Equal(t, sql.ErrNoRows, err) + + // Revert all tipsets at height 10 + _, err = s.db.Exec("UPDATE tipset_message SET reverted = 1 WHERE height = 10") + require.NoError(t, err) + + // Verify getNonRevertedTipsetAtHeightStmt returns no rows for the reverted height + err = s.stmts.getNonRevertedTipsetAtHeightStmt.QueryRow(10).Scan(&tipsetKeyCid) + require.Equal(t, sql.ErrNoRows, err) +} + +func TestMinNonRevertedHeightStmt(t *testing.T) { + s, err := NewSqliteIndexer(":memory:", nil, 0, false, 0) + require.NoError(t, err) + + // Test empty DB + var minHeight sql.NullInt64 + err = s.stmts.getMinNonRevertedHeightStmt.QueryRow().Scan(&minHeight) + require.NoError(t, err) + require.False(t, minHeight.Valid) + + // Insert non-reverted tipsets + insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: []byte("test_tipset_key_1"), + height: 10, + reverted: false, + messageCid: []byte("test_message_cid_1"), + messageIndex: 0, + }) + insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: []byte("test_tipset_key_2"), + height: 20, + reverted: false, + messageCid: []byte("test_message_cid_2"), + messageIndex: 0, + }) + + // Verify minimum non-reverted height + verifyMinNonRevertedHeightStmt(t, s, 10) + + // Insert reverted tipset with lower height + insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: []byte("test_tipset_key_4"), + height: 5, + reverted: true, + messageCid: []byte("test_message_cid_4"), + messageIndex: 0, + }) + + // Verify minimum non-reverted height hasn't changed + verifyMinNonRevertedHeightStmt(t, s, 10) + + // Revert all tipsets + _, err = s.db.Exec("UPDATE tipset_message SET reverted = 1") + require.NoError(t, err) + + // Verify no minimum non-reverted height + err = s.stmts.getMinNonRevertedHeightStmt.QueryRow().Scan(&minHeight) + require.NoError(t, err) + require.False(t, minHeight.Valid) +} + +func verifyMinNonRevertedHeightStmt(t *testing.T, s *SqliteIndexer, expectedMinHeight int64) { + var minHeight sql.NullInt64 + err := s.stmts.getMinNonRevertedHeightStmt.QueryRow().Scan(&minHeight) + require.NoError(t, err) + require.True(t, minHeight.Valid) + require.Equal(t, expectedMinHeight, minHeight.Int64) +} + +func TestGetMsgIdForMsgCidAndTipsetStmt(t *testing.T) { + s, err := NewSqliteIndexer(":memory:", nil, 0, false, 0) + require.NoError(t, err) + + // Insert a non-reverted tipset + tipsetKeyCid := []byte(tipsetKeyCid1) + messageCid := []byte(messageCid1) + insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: tipsetKeyCid, + height: 1, + reverted: false, + messageCid: messageCid, + messageIndex: 0, + }) + + // Verify getMsgIdForMsgCidAndTipset returns the correct message ID + var messageID int64 + err = s.stmts.getMsgIdForMsgCidAndTipsetStmt.QueryRow(tipsetKeyCid, messageCid).Scan(&messageID) + require.NoError(t, err) + require.Equal(t, int64(1), messageID) + + // Test with non-existent message CID + nonExistentMessageCid := []byte("non_existent_message_cid") + err = s.stmts.getMsgIdForMsgCidAndTipsetStmt.QueryRow(tipsetKeyCid, nonExistentMessageCid).Scan(&messageID) + require.Equal(t, sql.ErrNoRows, err) + + // Test with non-existent tipset key + nonExistentTipsetKeyCid := []byte("non_existent_tipset_key") + err = s.stmts.getMsgIdForMsgCidAndTipsetStmt.QueryRow(nonExistentTipsetKeyCid, messageCid).Scan(&messageID) + require.Equal(t, sql.ErrNoRows, err) + + // Insert a reverted tipset + revertedTipsetKeyCid := []byte("reverted_tipset_key") + insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: revertedTipsetKeyCid, + height: 2, + reverted: true, + messageCid: messageCid, + messageIndex: 0, + }) + + // Verify getMsgIdForMsgCidAndTipset doesn't return the message ID for a reverted tipset + err = s.stmts.getMsgIdForMsgCidAndTipsetStmt.QueryRow(revertedTipsetKeyCid, messageCid).Scan(&messageID) + require.Equal(t, sql.ErrNoRows, err) +} + +func TestForeignKeyCascadeDelete(t *testing.T) { + s, err := NewSqliteIndexer(":memory:", nil, 0, false, 0) + require.NoError(t, err) + + // Insert a tipset + messageID := insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: []byte("test_tipset_key"), + height: 1, + reverted: false, + messageCid: []byte(messageCid1), + messageIndex: 0, + }) + + // Insert an event for the tipset + eventID := insertEvent(t, s, event{ + messageID: messageID, + eventIndex: 0, + emitterId: 2, + emitterAddr: []byte("test_emitter_addr"), + reverted: false, + }) + + // Insert an event entry for the event + insertEventEntry(t, s, eventEntry{ + eventID: eventID, + indexed: true, + flags: []byte("test_flags"), + key: "test_key", + codec: 1, + value: []byte("test_value"), + }) + + // Delete the tipset + res, err := s.db.Exec("DELETE FROM tipset_message WHERE tipset_key_cid = ?", []byte("test_tipset_key")) + require.NoError(t, err) + rowsAffected, err := res.RowsAffected() + require.NoError(t, err) + require.Equal(t, int64(1), rowsAffected) + + // verify event is deleted + verifyEventAbsent(t, s, eventID) + verifyEventEntryAbsent(t, s, eventID) +} + +func TestInsertTipsetMessage(t *testing.T) { + s, err := NewSqliteIndexer(":memory:", nil, 0, false, 0) + require.NoError(t, err) + + ts := tipsetMessage{ + tipsetKeyCid: []byte("test_tipset_key"), + height: 1, + reverted: false, + messageCid: []byte(messageCid1), + messageIndex: 0, + } + + // Insert a tipset + messageID := insertTipsetMessage(t, s, ts) + + // revert the tipset + revertTipset(t, s, []byte("test_tipset_key")) + ts.reverted = true + verifyTipsetMessage(t, s, messageID, ts) + + // inserting with the same (tipset, message) should overwrite the reverted flag + res, err := s.stmts.insertTipsetMessageStmt.Exec(ts.tipsetKeyCid, ts.height, true, ts.messageCid, ts.messageIndex) + require.NoError(t, err) + + rowsAffected, err := res.RowsAffected() + require.NoError(t, err) + require.Equal(t, int64(1), rowsAffected) + + ts.reverted = false + verifyTipsetMessage(t, s, messageID, ts) +} + +type tipsetMessage struct { + tipsetKeyCid []byte + height uint64 + reverted bool + messageCid []byte + messageIndex int64 +} + +type event struct { + eventIndex uint64 + emitterId uint64 + emitterAddr []byte + reverted bool + messageID int64 +} + +type eventEntry struct { + eventID int64 + indexed bool + flags []byte + key string + codec int + value []byte +} + +func updateEventsToNonReverted(t *testing.T, s *SqliteIndexer, tsKeyCid []byte) { + res, err := s.stmts.updateEventsToNonRevertedStmt.Exec(tsKeyCid) + require.NoError(t, err) + + rowsAffected, err := res.RowsAffected() + require.NoError(t, err) + require.Equal(t, int64(1), rowsAffected) + + // read all events for this tipset and verify they are not reverted using a COUNT query + var count int + err = s.db.QueryRow("SELECT COUNT(*) FROM event e JOIN tipset_message tm ON e.message_id = tm.id WHERE tm.tipset_key_cid = ? AND e.reverted = 1", tsKeyCid).Scan(&count) + require.NoError(t, err) + require.Equal(t, 0, count, "Expected no reverted events for this tipset") +} + +func revertTipset(t *testing.T, s *SqliteIndexer, tipsetKeyCid []byte) { + res, err := s.stmts.updateTipsetToRevertedStmt.Exec(tipsetKeyCid) + require.NoError(t, err) + + rowsAffected, err := res.RowsAffected() + require.NoError(t, err) + require.Equal(t, int64(1), rowsAffected) + + var reverted bool + err = s.db.QueryRow("SELECT reverted FROM tipset_message WHERE tipset_key_cid = ?", tipsetKeyCid).Scan(&reverted) + require.NoError(t, err) + require.True(t, reverted) +} + +func verifyTipsetMessage(t *testing.T, s *SqliteIndexer, messageID int64, expectedTipsetMessage tipsetMessage) { + var tipsetKeyCid []byte + var height uint64 + var reverted bool + var messageCid []byte + var messageIndex int64 + err := s.db.QueryRow("SELECT tipset_key_cid, height, reverted, message_cid, message_index FROM tipset_message WHERE id = ?", messageID).Scan(&tipsetKeyCid, &height, &reverted, &messageCid, &messageIndex) + require.NoError(t, err) + require.Equal(t, expectedTipsetMessage.tipsetKeyCid, tipsetKeyCid) + require.Equal(t, expectedTipsetMessage.height, height) + require.Equal(t, expectedTipsetMessage.reverted, reverted) + require.Equal(t, expectedTipsetMessage.messageCid, messageCid) + require.Equal(t, expectedTipsetMessage.messageIndex, messageIndex) +} + +func verifyEventEntryAbsent(t *testing.T, s *SqliteIndexer, eventID int64) { + err := s.db.QueryRow("SELECT event_id FROM event_entry WHERE event_id = ?", eventID).Scan(&eventID) + require.Equal(t, sql.ErrNoRows, err) +} + +func verifyEventAbsent(t *testing.T, s *SqliteIndexer, eventID int64) { + var eventIndex uint64 + err := s.db.QueryRow("SELECT event_index FROM event WHERE id = ?", eventID).Scan(&eventIndex) + require.Equal(t, sql.ErrNoRows, err) +} + +func verifyEvent(t *testing.T, s *SqliteIndexer, eventID int64, expectedEvent event) { + var eventIndex uint64 + var emitterAddr []byte + var reverted bool + var messageID int64 + err := s.db.QueryRow("SELECT event_index, emitter_addr, reverted, message_id FROM event WHERE id = ?", eventID).Scan(&eventIndex, &emitterAddr, &reverted, &messageID) + require.NoError(t, err) + require.Equal(t, expectedEvent.eventIndex, eventIndex) + require.Equal(t, expectedEvent.emitterAddr, emitterAddr) + require.Equal(t, expectedEvent.reverted, reverted) + require.Equal(t, expectedEvent.messageID, messageID) +} + +func verifyCountTipsetsAtHeightStmt(t *testing.T, s *SqliteIndexer, height uint64, expectedRevertedCount, expectedNonRevertedCount int) { + var revertedCount, nonRevertedCount int + err := s.stmts.countTipsetsAtHeightStmt.QueryRow(height).Scan(&revertedCount, &nonRevertedCount) + require.NoError(t, err) + require.Equal(t, expectedRevertedCount, revertedCount) + require.Equal(t, expectedNonRevertedCount, nonRevertedCount) +} + +func verifyHasTipsetStmt(t *testing.T, s *SqliteIndexer, tipsetKeyCid []byte, expectedHas bool) { + var has bool + err := s.stmts.hasTipsetStmt.QueryRow(tipsetKeyCid).Scan(&has) + require.NoError(t, err) + require.Equal(t, expectedHas, has) +} + +func verifyHasRevertedEventsInTipsetStmt(t *testing.T, s *SqliteIndexer, tipsetKeyCid []byte, expectedHas bool) { + var hasRevertedEventsInTipset bool + err := s.stmts.hasRevertedEventsInTipsetStmt.QueryRow(tipsetKeyCid).Scan(&hasRevertedEventsInTipset) + require.NoError(t, err) + require.Equal(t, expectedHas, hasRevertedEventsInTipset) +} + +func verifyHasNullRoundAtHeightStmt(t *testing.T, s *SqliteIndexer, height uint64, expectedHasNullRound bool) { + var hasNullRound bool + err := s.stmts.hasNullRoundAtHeightStmt.QueryRow(height).Scan(&hasNullRound) + require.NoError(t, err) + require.Equal(t, expectedHasNullRound, hasNullRound) +} + +func verifyNonRevertedMessageCount(t *testing.T, s *SqliteIndexer, tipsetKeyCid []byte, expectedCount int) { + var count int + err := s.stmts.getNonRevertedTipsetMessageCountStmt.QueryRow(tipsetKeyCid).Scan(&count) + require.NoError(t, err) + require.Equal(t, expectedCount, count) +} + +func verifyNonRevertedEventCount(t *testing.T, s *SqliteIndexer, tipsetKeyCid []byte, expectedCount int) { + var count int + err := s.stmts.getNonRevertedTipsetEventCountStmt.QueryRow(tipsetKeyCid).Scan(&count) + require.NoError(t, err) + require.Equal(t, expectedCount, count) +} + +func verifyNonRevertedEventEntriesCount(t *testing.T, s *SqliteIndexer, tipsetKeyCid []byte, expectedCount int) { + var count int + err := s.stmts.getNonRevertedTipsetEventEntriesCountStmt.QueryRow(tipsetKeyCid).Scan(&count) + require.NoError(t, err) + require.Equal(t, expectedCount, count) +} + +func insertTipsetMessage(t *testing.T, s *SqliteIndexer, ts tipsetMessage) int64 { + res, err := s.stmts.insertTipsetMessageStmt.Exec(ts.tipsetKeyCid, ts.height, ts.reverted, ts.messageCid, ts.messageIndex) + require.NoError(t, err) + + rowsAffected, err := res.RowsAffected() + require.NoError(t, err) + require.Equal(t, int64(1), rowsAffected) + + messageID, err := res.LastInsertId() + require.NoError(t, err) + require.NotEqual(t, int64(0), messageID) + + // read back the message to verify it was inserted correctly + verifyTipsetMessage(t, s, messageID, ts) + + return messageID +} + +func insertEvent(t *testing.T, s *SqliteIndexer, e event) int64 { + res, err := s.stmts.insertEventStmt.Exec(e.messageID, e.eventIndex, e.emitterId, e.emitterAddr, e.reverted) + require.NoError(t, err) + + rowsAffected, err := res.RowsAffected() + require.NoError(t, err) + require.Equal(t, int64(1), rowsAffected) + + eventID, err := res.LastInsertId() + require.NoError(t, err) + require.NotEqual(t, int64(0), eventID) + + verifyEvent(t, s, eventID, e) + + return eventID +} + +func insertEventEntry(t *testing.T, s *SqliteIndexer, ee eventEntry) { + res, err := s.stmts.insertEventEntryStmt.Exec(ee.eventID, ee.indexed, ee.flags, ee.key, ee.codec, ee.value) + require.NoError(t, err) + + rowsAffected, err := res.RowsAffected() + require.NoError(t, err) + require.Equal(t, int64(1), rowsAffected) +} diff --git a/chain/index/events.go b/chain/index/events.go new file mode 100644 index 00000000000..0a1836f7b96 --- /dev/null +++ b/chain/index/events.go @@ -0,0 +1,602 @@ +package index + +import ( + "bytes" + "context" + "database/sql" + "errors" + "fmt" + "math" + "sort" + "strings" + + "github.com/ipfs/go-cid" + ipld "github.com/ipfs/go-ipld-format" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + amt4 "github.com/filecoin-project/go-amt-ipld/v4" + "github.com/filecoin-project/go-state-types/abi" + blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" + + "github.com/filecoin-project/lotus/chain/types" +) + +const maxLookBackForWait = 120 // one hour of tipsets + +type executedMessage struct { + msg types.ChainMsg + rct types.MessageReceipt + // events extracted from receipt + evs []types.Event +} + +// events are indexed against their inclusion/message tipset when we get the corresponding execution tipset +func (si *SqliteIndexer) indexEvents(ctx context.Context, tx *sql.Tx, msgTs *types.TipSet, executionTs *types.TipSet) error { + if si.actorToDelegatedAddresFunc == nil { + return xerrors.Errorf("indexer can not index events without an address resolver") + } + if si.executedMessagesLoaderFunc == nil { + return xerrors.Errorf("indexer can not index events without an event loader") + } + + // check if we have an event indexed for any message in the `msgTs` tipset -> if so, there's nothig to do here + // this makes event inserts idempotent + msgTsKeyCidBytes, err := toTipsetKeyCidBytes(msgTs) + if err != nil { + return xerrors.Errorf("failed to get tipset key cid: %w", err) + } + + // if we've already indexed events for this tipset, mark them as unreverted and return + res, err := tx.Stmt(si.stmts.updateEventsToNonRevertedStmt).ExecContext(ctx, msgTsKeyCidBytes) + if err != nil { + return xerrors.Errorf("failed to unrevert events for tipset: %w", err) + } + rows, err := res.RowsAffected() + if err != nil { + return xerrors.Errorf("failed to get rows affected by unreverting events for tipset: %w", err) + } + if rows > 0 { + log.Debugf("unreverted %d events for tipset: %s", rows, msgTs.Key()) + return nil + } + + if !si.cs.IsStoringEvents() { + return nil + } + + ems, err := si.executedMessagesLoaderFunc(ctx, si.cs, msgTs, executionTs) + if err != nil { + return xerrors.Errorf("failed to load executed messages: %w", err) + } + eventCount := 0 + addressLookups := make(map[abi.ActorID]address.Address) + + for _, em := range ems { + msgCidBytes := em.msg.Cid().Bytes() + + // read message id for this message cid and tipset key cid + var messageID int64 + if err := tx.Stmt(si.stmts.getMsgIdForMsgCidAndTipsetStmt).QueryRowContext(ctx, msgTsKeyCidBytes, msgCidBytes).Scan(&messageID); err != nil { + return xerrors.Errorf("failed to get message id for message cid and tipset key cid: %w", err) + } + if messageID == 0 { + return xerrors.Errorf("message id not found for message cid %s and tipset key cid %s", em.msg.Cid(), msgTs.Key()) + } + + // Insert events for this message + for _, event := range em.evs { + addr, found := addressLookups[event.Emitter] + if !found { + var ok bool + addr, ok = si.actorToDelegatedAddresFunc(ctx, event.Emitter, executionTs) + if !ok { + // not an address we will be able to match against + continue + } + addressLookups[event.Emitter] = addr + } + + var robustAddrbytes []byte + if addr.Protocol() == address.Delegated { + robustAddrbytes = addr.Bytes() + } + + // Insert event into events table + eventResult, err := tx.Stmt(si.stmts.insertEventStmt).ExecContext(ctx, messageID, eventCount, uint64(event.Emitter), robustAddrbytes, 0) + if err != nil { + return xerrors.Errorf("failed to insert event: %w", err) + } + + // Get the event_id of the inserted event + eventID, err := eventResult.LastInsertId() + if err != nil { + return xerrors.Errorf("failed to get last insert id for event: %w", err) + } + + // Insert event entries + for _, entry := range event.Entries { + _, err := tx.Stmt(si.stmts.insertEventEntryStmt).ExecContext(ctx, + eventID, + isIndexedFlag(entry.Flags), + []byte{entry.Flags}, + entry.Key, + entry.Codec, + entry.Value, + ) + if err != nil { + return xerrors.Errorf("failed to insert event entry: %w", err) + } + } + eventCount++ + } + } + + return nil +} + +func loadExecutedMessages(ctx context.Context, cs ChainStore, recomputeTipSetStateFunc RecomputeTipSetStateFunc, msgTs, rctTs *types.TipSet) ([]executedMessage, error) { + msgs, err := cs.MessagesForTipset(ctx, msgTs) + if err != nil { + return nil, xerrors.Errorf("failed to get messages for tipset: %w", err) + } + + st := cs.ActorStore(ctx) + + var recomputed bool + recompute := func() error { + tskCid, err2 := rctTs.Key().Cid() + if err2 != nil { + return xerrors.Errorf("failed to compute tipset key cid: %w", err2) + } + + log.Warnf("failed to load receipts for tipset %s (height %d): %s; recomputing tipset state", tskCid.String(), rctTs.Height(), err.Error()) + if err := recomputeTipSetStateFunc(ctx, msgTs); err != nil { + return xerrors.Errorf("failed to recompute tipset state: %w", err) + } + log.Warnf("successfully recomputed tipset state and loaded events for %s (height %d)", tskCid.String(), rctTs.Height()) + return nil + } + + receiptsArr, err := blockadt.AsArray(st, rctTs.Blocks()[0].ParentMessageReceipts) + if err != nil { + if !ipld.IsNotFound(err) || recomputeTipSetStateFunc == nil { + return nil, xerrors.Errorf("failed to load message receipts: %w", err) + } + + if err := recompute(); err != nil { + return nil, err + } + recomputed = true + receiptsArr, err = blockadt.AsArray(st, rctTs.Blocks()[0].ParentMessageReceipts) + if err != nil { + return nil, xerrors.Errorf("failed to load receipts after tipset state recompute: %w", err) + } + } + + if uint64(len(msgs)) != receiptsArr.Length() { + return nil, xerrors.Errorf("mismatching message and receipt counts (%d msgs, %d rcts)", len(msgs), receiptsArr.Length()) + } + + ems := make([]executedMessage, len(msgs)) + + for i := 0; i < len(msgs); i++ { + ems[i].msg = msgs[i] + + var rct types.MessageReceipt + if found, err := receiptsArr.Get(uint64(i), &rct); err != nil { + return nil, xerrors.Errorf("failed to load receipt %d: %w", i, err) + } else if !found { + return nil, xerrors.Errorf("receipt %d not found", i) + } + ems[i].rct = rct + + // no events in the receipt + if rct.EventsRoot == nil { + continue + } + + eventsArr, err := amt4.LoadAMT(ctx, st, *rct.EventsRoot, amt4.UseTreeBitWidth(types.EventAMTBitwidth)) + if err != nil { + if !ipld.IsNotFound(err) || recomputeTipSetStateFunc == nil || recomputed { + return nil, xerrors.Errorf("failed to load events root for message %s: err: %w", ems[i].msg.Cid(), err) + } + // we may have the receipts but not the events, IsStoringEvents may have been false + if err := recompute(); err != nil { + return nil, err + } + eventsArr, err = amt4.LoadAMT(ctx, st, *rct.EventsRoot, amt4.UseTreeBitWidth(types.EventAMTBitwidth)) + if err != nil { + return nil, xerrors.Errorf("failed to load events amt for re-executed tipset for message %s: %w", ems[i].msg.Cid(), err) + } + } + + ems[i].evs = make([]types.Event, eventsArr.Len()) + var evt types.Event + err = eventsArr.ForEach(ctx, func(u uint64, deferred *cbg.Deferred) error { + if u > math.MaxInt { + return xerrors.Errorf("too many events") + } + if err := evt.UnmarshalCBOR(bytes.NewReader(deferred.Raw)); err != nil { + return err + } + + cpy := evt + ems[i].evs[int(u)] = cpy + return nil + }) + + if err != nil { + return nil, xerrors.Errorf("failed to iterate over events for message %d: %w", i, err) + } + } + + return ems, nil +} + +// checkTipsetIndexedStatus verifies if a specific tipset is indexed based on the EventFilter. +// It returns nil if the tipset is indexed, ErrNotFound if it's not indexed or not specified, +func (si *SqliteIndexer) checkTipsetIndexedStatus(ctx context.Context, f *EventFilter) error { + var tipsetKeyCid []byte + var err error + + // Determine the tipset to check based on the filter + switch { + case f.TipsetCid != cid.Undef: + tipsetKeyCid = f.TipsetCid.Bytes() + case f.MinHeight >= 0 && f.MinHeight == f.MaxHeight: + tipsetKeyCid, err = si.getTipsetKeyCidByHeight(ctx, f.MinHeight) + if err != nil { + if err == ErrNotFound { + // this means that this is a null round and there exist no events for this epoch + return nil + } + + return xerrors.Errorf("failed to get tipset key cid by height: %w", err) + } + default: + // This function distinguishes between two scenarios: + // 1. Missing events: The requested tipset is not present in the Index (an error condition). + // 2. Valid case: The tipset exists but contains no events (a normal situation). + // Currently, this distinction is only made for the common use case where a user requests events for a single tipset. + // TODO: Implement this functionality for a range of tipsets. This is expensive and not a common use case so it's deferred for now. + return nil + } + + // If we couldn't determine a specific tipset, return ErrNotFound + if tipsetKeyCid == nil { + return ErrNotFound + } + + // Check if the determined tipset is indexed + if exists, err := si.isTipsetIndexed(ctx, tipsetKeyCid); err != nil { + return xerrors.Errorf("failed to check if tipset is indexed: %w", err) + } else if exists { + return nil // Tipset is indexed + } + + return ErrNotFound // Tipset is not indexed +} + +// getTipsetKeyCidByHeight retrieves the tipset key CID for a given height. +func (si *SqliteIndexer) getTipsetKeyCidByHeight(ctx context.Context, height abi.ChainEpoch) ([]byte, error) { + ts, err := si.cs.GetTipsetByHeight(ctx, height, nil, false) + if err != nil { + return nil, xerrors.Errorf("failed to get tipset by height: %w", err) + } + if ts == nil { + return nil, xerrors.Errorf("tipset is nil for height: %d", height) + } + + if ts.Height() != height { + // this means that this is a null round + return nil, ErrNotFound + } + + return toTipsetKeyCidBytes(ts) +} + +// GetEventsForFilter returns matching events for the given filter +// Returns nil, nil if the filter has no matching events +// Returns nil, ErrNotFound if the filter has no matching events and the tipset is not indexed +// Returns nil, err for all other errors +func (si *SqliteIndexer) GetEventsForFilter(ctx context.Context, f *EventFilter) ([]*CollectedEvent, error) { + getEventsFnc := func(stmt *sql.Stmt, values []any) ([]*CollectedEvent, error) { + q, err := stmt.QueryContext(ctx, values...) + if err != nil { + return nil, xerrors.Errorf("failed to query events: %w", err) + } + defer func() { _ = q.Close() }() + + var ces []*CollectedEvent + var currentID int64 = -1 + var ce *CollectedEvent + + for q.Next() { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + + var row struct { + id int64 + height uint64 + tipsetKeyCid []byte + emitterID uint64 + emitterAddr []byte + eventIndex int + messageCid []byte + messageIndex int + reverted bool + flags []byte + key string + codec uint64 + value []byte + } + + if err := q.Scan( + &row.id, + &row.height, + &row.tipsetKeyCid, + &row.emitterID, + &row.emitterAddr, + &row.eventIndex, + &row.messageCid, + &row.messageIndex, + &row.reverted, + &row.flags, + &row.key, + &row.codec, + &row.value, + ); err != nil { + return nil, xerrors.Errorf("read prefill row: %w", err) + } + + // The query will return all entries for all matching events, so we need to keep track + // of which event we are dealing with and create a new one each time we see a new id + if row.id != currentID { + // Unfortunately we can't easily incorporate the max results limit into the query due to the + // unpredictable number of rows caused by joins + // Break here to stop collecting rows + if f.MaxResults > 0 && len(ces) >= f.MaxResults { + break + } + + currentID = row.id + ce = &CollectedEvent{ + EventIdx: row.eventIndex, + Reverted: row.reverted, + Height: abi.ChainEpoch(row.height), + MsgIdx: row.messageIndex, + } + ces = append(ces, ce) + + if row.emitterAddr == nil { + ce.EmitterAddr, err = address.NewIDAddress(row.emitterID) + if err != nil { + return nil, xerrors.Errorf("failed to parse emitter id: %w", err) + } + } else { + ce.EmitterAddr, err = address.NewFromBytes(row.emitterAddr) + if err != nil { + return nil, xerrors.Errorf("parse emitter addr: %w", err) + } + } + + tsKeyCid, err := cid.Cast(row.tipsetKeyCid) + if err != nil { + return nil, xerrors.Errorf("parse tipsetkey cid: %w", err) + } + + ts, err := si.cs.GetTipSetByCid(ctx, tsKeyCid) + if err != nil { + return nil, xerrors.Errorf("get tipset by cid: %w", err) + } + if ts == nil { + return nil, xerrors.Errorf("failed to get tipset from cid: tipset is nil for cid: %s", tsKeyCid) + } + + ce.TipSetKey = ts.Key() + + ce.MsgCid, err = cid.Cast(row.messageCid) + if err != nil { + return nil, xerrors.Errorf("parse message cid: %w", err) + } + } + + ce.Entries = append(ce.Entries, types.EventEntry{ + Flags: row.flags[0], + Key: row.key, + Codec: row.codec, + Value: row.value, + }) + } + + if len(ces) == 0 { + return nil, nil + } + + // collected event list is in inverted order since we selected only the most recent events + // sort it into height order + sort.Slice(ces, func(i, j int) bool { return ces[i].Height < ces[j].Height }) + + return ces, nil + } + + values, query, err := makePrefillFilterQuery(f) + if err != nil { + return nil, xerrors.Errorf("failed to make prefill filter query: %w", err) + } + + stmt, err := si.db.Prepare(query) + if err != nil { + return nil, xerrors.Errorf("prepare prefill query: %w", err) + } + defer func() { _ = stmt.Close() }() + + ces, err := getEventsFnc(stmt, values) + if err != nil { + return nil, xerrors.Errorf("failed to get events: %w", err) + } + if len(ces) == 0 { + height := f.MaxHeight + if f.TipsetCid != cid.Undef { + ts, err := si.cs.GetTipSetByCid(ctx, f.TipsetCid) + if err != nil { + return nil, xerrors.Errorf("failed to get tipset by cid: %w", err) + } + if ts == nil { + return nil, xerrors.Errorf("failed to get tipset from cid: tipset is nil for cid: %s", f.TipsetCid) + } + height = ts.Height() + } + if height > 0 { + head := si.cs.GetHeaviestTipSet() + if head == nil { + return nil, errors.New("failed to get head: head is nil") + } + headHeight := head.Height() + maxLookBackHeight := headHeight - maxLookBackForWait + + // if the height is old enough, we'll assume the index is caught up to it and not bother + // waiting for it to be indexed + if height <= maxLookBackHeight { + return nil, si.checkTipsetIndexedStatus(ctx, f) + } + } + + // there's no matching events for the filter, wait till index has caught up to the head and then retry + if err := si.waitTillHeadIndexed(ctx); err != nil { + return nil, xerrors.Errorf("failed to wait for head to be indexed: %w", err) + } + ces, err = getEventsFnc(stmt, values) + if err != nil { + return nil, xerrors.Errorf("failed to get events: %w", err) + } + + if len(ces) == 0 { + return nil, si.checkTipsetIndexedStatus(ctx, f) + } + } + + return ces, nil +} + +func makePrefillFilterQuery(f *EventFilter) ([]any, string, error) { + clauses := []string{} + values := []any{} + joins := []string{} + + if f.TipsetCid != cid.Undef { + clauses = append(clauses, "tm.tipset_key_cid=?") + values = append(values, f.TipsetCid.Bytes()) + } else { + if f.MinHeight >= 0 && f.MinHeight == f.MaxHeight { + clauses = append(clauses, "tm.height=?") + values = append(values, f.MinHeight) + } else { + if f.MaxHeight >= 0 && f.MinHeight >= 0 { + clauses = append(clauses, "tm.height BETWEEN ? AND ?") + values = append(values, f.MinHeight, f.MaxHeight) + } else if f.MinHeight >= 0 { + clauses = append(clauses, "tm.height >= ?") + values = append(values, f.MinHeight) + } else if f.MaxHeight >= 0 { + clauses = append(clauses, "tm.height <= ?") + values = append(values, f.MaxHeight) + } else { + return nil, "", xerrors.Errorf("filter must specify either a tipset or a height range") + } + } + // unless asking for a specific tipset, we never want to see reverted historical events + clauses = append(clauses, "e.reverted=?") + values = append(values, false) + } + + if len(f.Addresses) > 0 { + idAddresses := make([]uint64, 0) + delegatedAddresses := make([][]byte, 0) + + for _, addr := range f.Addresses { + switch addr.Protocol() { + case address.ID: + id, err := address.IDFromAddress(addr) + if err != nil { + return nil, "", xerrors.Errorf("failed to get ID from address: %w", err) + } + idAddresses = append(idAddresses, id) + case address.Delegated: + delegatedAddresses = append(delegatedAddresses, addr.Bytes()) + default: + return nil, "", xerrors.Errorf("can only query events by ID or Delegated addresses; but request has address: %s", addr) + } + } + + if len(idAddresses) > 0 { + placeholders := strings.Repeat("?,", len(idAddresses)-1) + "?" + clauses = append(clauses, "e.emitter_id IN ("+placeholders+")") + for _, id := range idAddresses { + values = append(values, id) + } + } + + if len(delegatedAddresses) > 0 { + placeholders := strings.Repeat("?,", len(delegatedAddresses)-1) + "?" + clauses = append(clauses, "e.emitter_addr IN ("+placeholders+")") + for _, addr := range delegatedAddresses { + values = append(values, addr) + } + } + } + + if len(f.KeysWithCodec) > 0 { + join := 0 + for key, vals := range f.KeysWithCodec { + if len(vals) > 0 { + join++ + joinAlias := fmt.Sprintf("ee%d", join) + joins = append(joins, fmt.Sprintf("event_entry %s ON e.id=%[1]s.event_id", joinAlias)) + clauses = append(clauses, fmt.Sprintf("%s.indexed=1 AND %[1]s.key=?", joinAlias)) + values = append(values, key) + subclauses := make([]string, 0, len(vals)) + for _, val := range vals { + subclauses = append(subclauses, fmt.Sprintf("(%s.value=? AND %[1]s.codec=?)", joinAlias)) + values = append(values, val.Value, val.Codec) + } + clauses = append(clauses, "("+strings.Join(subclauses, " OR ")+")") + } + } + } + + s := `SELECT + e.id, + tm.height, + tm.tipset_key_cid, + e.emitter_id, + e.emitter_addr, + e.event_index, + tm.message_cid, + tm.message_index, + e.reverted, + ee.flags, + ee.key, + ee.codec, + ee.value + FROM event e + JOIN tipset_message tm ON e.message_id = tm.id + JOIN event_entry ee ON e.id = ee.event_id` + + if len(joins) > 0 { + s = s + ", " + strings.Join(joins, ", ") + } + + if len(clauses) > 0 { + s = s + " WHERE " + strings.Join(clauses, " AND ") + } + + // retain insertion order of event_entry rows + s += " ORDER BY tm.height DESC, ee._rowid_ ASC" + return values, s, nil +} diff --git a/chain/index/events_test.go b/chain/index/events_test.go new file mode 100644 index 00000000000..5cf00e89ff0 --- /dev/null +++ b/chain/index/events_test.go @@ -0,0 +1,441 @@ +package index + +import ( + "context" + "database/sql" + "errors" + pseudo "math/rand" + "sort" + "testing" + "time" + + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/lib/must" +) + +func TestGetEventsForFilterNoEvents(t *testing.T) { + ctx := context.Background() + seed := time.Now().UnixNano() + t.Logf("seed: %d", seed) + rng := pseudo.New(pseudo.NewSource(seed)) + + headHeight := abi.ChainEpoch(60) + si, _, cs := setupWithHeadIndexed(t, headHeight, rng) + t.Cleanup(func() { _ = si.Close() }) + + // Create a fake tipset at height 1 + fakeTipSet1 := fakeTipSet(t, rng, 1, nil) + + // Set the dummy chainstore to return this tipset for height 1 + cs.SetTipsetByHeightAndKey(1, fakeTipSet1.Key(), fakeTipSet1) // empty DB + cs.SetTipSetByCid(t, fakeTipSet1) + + // tipset is not indexed + f := &EventFilter{ + MinHeight: 1, + MaxHeight: 1, + } + ces, err := si.GetEventsForFilter(ctx, f) + require.True(t, errors.Is(err, ErrNotFound)) + require.Equal(t, 0, len(ces)) + + tsCid, err := fakeTipSet1.Key().Cid() + require.NoError(t, err) + f = &EventFilter{ + TipsetCid: tsCid, + } + + ces, err = si.GetEventsForFilter(ctx, f) + require.True(t, errors.Is(err, ErrNotFound)) + require.Equal(t, 0, len(ces)) + + // tipset is indexed but has no events + err = withTx(ctx, si.db, func(tx *sql.Tx) error { + return si.indexTipset(ctx, tx, fakeTipSet1) + }) + require.NoError(t, err) + + ces, err = si.GetEventsForFilter(ctx, f) + require.NoError(t, err) + require.Equal(t, 0, len(ces)) + + f = &EventFilter{ + TipsetCid: tsCid, + } + ces, err = si.GetEventsForFilter(ctx, f) + require.NoError(t, err) + require.Equal(t, 0, len(ces)) + + // search for a range that is absent + f = &EventFilter{ + MinHeight: 100, + MaxHeight: 200, + } + ces, err = si.GetEventsForFilter(ctx, f) + require.NoError(t, err) + require.Equal(t, 0, len(ces)) +} + +func TestGetEventsForFilterWithEvents(t *testing.T) { + ctx := context.Background() + seed := time.Now().UnixNano() + t.Logf("seed: %d", seed) + rng := pseudo.New(pseudo.NewSource(seed)) + headHeight := abi.ChainEpoch(60) + si, _, cs := setupWithHeadIndexed(t, headHeight, rng) + t.Cleanup(func() { _ = si.Close() }) + + ev1 := fakeEvent( + abi.ActorID(1), + []kv{ + {k: "type", v: []byte("approval")}, + {k: "signer", v: []byte("addr1")}, + }, + []kv{ + {k: "amount", v: []byte("2988181")}, + }, + ) + + ev2 := fakeEvent( + abi.ActorID(2), + []kv{ + {k: "type", v: []byte("approval")}, + {k: "signer", v: []byte("addr2")}, + }, + []kv{ + {k: "amount", v: []byte("2988181")}, + }, + ) + + events := []types.Event{*ev1, *ev2} + + fm := fakeMessage(address.TestAddress, address.TestAddress) + em1 := executedMessage{ + msg: fm, + evs: events, + } + + si.SetActorToDelegatedAddresFunc(func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool) { + idAddr, err := address.NewIDAddress(uint64(emitter)) + if err != nil { + return address.Undef, false + } + + return idAddr, true + }) + + si.setExecutedMessagesLoaderFunc(func(ctx context.Context, cs ChainStore, msgTs, rctTs *types.TipSet) ([]executedMessage, error) { + return []executedMessage{em1}, nil + }) + + // Create a fake tipset at height 1 + fakeTipSet1 := fakeTipSet(t, rng, 1, nil) + fakeTipSet2 := fakeTipSet(t, rng, 2, nil) + + // Set the dummy chainstore to return this tipset for height 1 + cs.SetTipsetByHeightAndKey(1, fakeTipSet1.Key(), fakeTipSet1) // empty DB + cs.SetTipsetByHeightAndKey(2, fakeTipSet2.Key(), fakeTipSet2) // empty DB + cs.SetTipSetByCid(t, fakeTipSet1) + cs.SetTipSetByCid(t, fakeTipSet2) + + cs.SetMessagesForTipset(fakeTipSet1, []types.ChainMsg{fm}) + + // index tipset and events + require.NoError(t, si.Apply(ctx, fakeTipSet1, fakeTipSet2)) + + // fetch it based on height -> works + f := &EventFilter{ + MinHeight: 1, + MaxHeight: 1, + } + ces, err := si.GetEventsForFilter(ctx, f) + require.NoError(t, err) + require.Equal(t, 2, len(ces)) + + // fetch it based on cid -> works + tsCid1, err := fakeTipSet1.Key().Cid() + require.NoError(t, err) + + tsCid2, err := fakeTipSet2.Key().Cid() + require.NoError(t, err) + + f = &EventFilter{ + TipsetCid: tsCid1, + } + ces, err = si.GetEventsForFilter(ctx, f) + require.NoError(t, err) + + require.Equal(t, []*CollectedEvent{ + { + Entries: ev1.Entries, + EmitterAddr: must.One(address.NewIDAddress(uint64(ev1.Emitter))), + EventIdx: 0, + Reverted: false, + Height: 1, + TipSetKey: fakeTipSet1.Key(), + MsgIdx: 0, + MsgCid: fm.Cid(), + }, + { + Entries: ev2.Entries, + EmitterAddr: must.One(address.NewIDAddress(uint64(ev2.Emitter))), + EventIdx: 1, + Reverted: false, + Height: 1, + TipSetKey: fakeTipSet1.Key(), + MsgIdx: 0, + MsgCid: fm.Cid(), + }, + }, ces) + + // mark fakeTipSet2 as reverted so events for fakeTipSet1 are reverted + require.NoError(t, si.Revert(ctx, fakeTipSet2, fakeTipSet1)) + + var reverted bool + err = si.db.QueryRow("SELECT reverted FROM tipset_message WHERE tipset_key_cid = ?", tsCid2.Bytes()).Scan(&reverted) + require.NoError(t, err) + require.True(t, reverted) + + var reverted2 bool + err = si.db.QueryRow("SELECT reverted FROM tipset_message WHERE tipset_key_cid = ?", tsCid1.Bytes()).Scan(&reverted2) + require.NoError(t, err) + require.False(t, reverted2) + + // fetching events fails if excludeReverted is true i.e. we request events by height + f = &EventFilter{ + MinHeight: 1, + MaxHeight: 1, + } + ces, err = si.GetEventsForFilter(ctx, f) + require.NoError(t, err) + require.Equal(t, 0, len(ces)) + + // works if excludeReverted is false i.e. we request events by hash + f = &EventFilter{ + TipsetCid: tsCid1, + } + ces, err = si.GetEventsForFilter(ctx, f) + require.NoError(t, err) + require.Equal(t, 2, len(ces)) +} + +func TestGetEventsFilterByAddress(t *testing.T) { + ctx := context.Background() + seed := time.Now().UnixNano() + t.Logf("seed: %d", seed) + rng := pseudo.New(pseudo.NewSource(seed)) + headHeight := abi.ChainEpoch(60) + si, _, cs := setupWithHeadIndexed(t, headHeight, rng) + t.Cleanup(func() { _ = si.Close() }) + + addr1, err := address.NewIDAddress(1) + require.NoError(t, err) + addr2, err := address.NewIDAddress(2) + require.NoError(t, err) + addr3, err := address.NewIDAddress(3) + require.NoError(t, err) + + delegatedAddr1, err := address.NewFromString("f410fagkp3qx2f76maqot74jaiw3tzbxe76k76zrkl3xifk67isrnbn2sll3yua") + require.NoError(t, err) + + ev1 := fakeEvent( + abi.ActorID(1), + []kv{ + {k: "type", v: []byte("approval")}, + {k: "signer", v: []byte("addr1")}, + }, + []kv{ + {k: "amount", v: []byte("2988181")}, + }, + ) + + ev2 := fakeEvent( + abi.ActorID(2), + []kv{ + {k: "type", v: []byte("approval")}, + {k: "signer", v: []byte("addr2")}, + }, + []kv{ + {k: "amount", v: []byte("2988181")}, + }, + ) + + events := []types.Event{*ev1, *ev2} + + fm := fakeMessage(address.TestAddress, address.TestAddress) + em1 := executedMessage{ + msg: fm, + evs: events, + } + + si.SetActorToDelegatedAddresFunc(func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool) { + if emitter == abi.ActorID(1) { + return delegatedAddr1, true + } + idAddr, err := address.NewIDAddress(uint64(emitter)) + if err != nil { + return address.Undef, false + } + return idAddr, true + }) + + si.setExecutedMessagesLoaderFunc(func(ctx context.Context, cs ChainStore, msgTs, rctTs *types.TipSet) ([]executedMessage, error) { + return []executedMessage{em1}, nil + }) + + // Create a fake tipset at height 1 + fakeTipSet1 := fakeTipSet(t, rng, 1, nil) + fakeTipSet2 := fakeTipSet(t, rng, 2, nil) + + // Set the dummy chainstore to return this tipset for height 1 + cs.SetTipsetByHeightAndKey(1, fakeTipSet1.Key(), fakeTipSet1) // empty DB + cs.SetTipsetByHeightAndKey(2, fakeTipSet2.Key(), fakeTipSet2) // empty DB + cs.SetTipSetByCid(t, fakeTipSet1) + cs.SetTipSetByCid(t, fakeTipSet2) + + cs.SetMessagesForTipset(fakeTipSet1, []types.ChainMsg{fm}) + + require.NoError(t, si.Apply(ctx, fakeTipSet1, fakeTipSet2)) + + testCases := []struct { + name string + f *EventFilter + expectedCount int + expectedAddresses []address.Address + }{ + { + name: "matching single ID address (non-delegated)", + f: &EventFilter{ + Addresses: []address.Address{addr2}, + MinHeight: 1, + MaxHeight: 1, + }, + expectedCount: 1, + expectedAddresses: []address.Address{addr2}, + }, + { + name: "matching single ID address", + f: &EventFilter{ + Addresses: []address.Address{addr1}, + MinHeight: 1, + MaxHeight: 1, + }, + expectedCount: 1, + expectedAddresses: []address.Address{delegatedAddr1}, + }, + { + name: "matching single delegated address", + f: &EventFilter{ + Addresses: []address.Address{delegatedAddr1}, + MinHeight: 1, + MaxHeight: 1, + }, + expectedCount: 1, + expectedAddresses: []address.Address{delegatedAddr1}, + }, + { + name: "matching multiple addresses", + f: &EventFilter{ + Addresses: []address.Address{addr1, addr2}, + MinHeight: 1, + MaxHeight: 1, + }, + expectedCount: 2, + expectedAddresses: []address.Address{delegatedAddr1, addr2}, + }, + { + name: "no matching address", + f: &EventFilter{ + Addresses: []address.Address{addr3}, + MinHeight: 1, + MaxHeight: 1, + }, + expectedCount: 0, + expectedAddresses: []address.Address{}, + }, + { + name: "empty address list", + f: &EventFilter{ + Addresses: []address.Address{}, + MinHeight: 1, + MaxHeight: 1, + }, + expectedCount: 2, + expectedAddresses: []address.Address{delegatedAddr1, addr2}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ces, err := si.GetEventsForFilter(ctx, tc.f) + require.NoError(t, err) + require.Equal(t, tc.expectedCount, len(ces)) + + actualAddresses := make([]address.Address, len(ces)) + for i, ce := range ces { + actualAddresses[i] = ce.EmitterAddr + } + + sortAddresses(tc.expectedAddresses) + sortAddresses(actualAddresses) + + require.Equal(t, tc.expectedAddresses, actualAddresses) + }) + } +} + +func sortAddresses(addrs []address.Address) { + sort.Slice(addrs, func(i, j int) bool { + return addrs[i].String() < addrs[j].String() + }) +} + +func fakeMessage(to, from address.Address) *types.Message { + return &types.Message{ + To: to, + From: from, + Nonce: 197, + Method: 1, + Params: []byte("some random bytes"), + GasLimit: 126723, + GasPremium: types.NewInt(4), + GasFeeCap: types.NewInt(120), + } +} + +func fakeEvent(emitter abi.ActorID, indexed []kv, unindexed []kv) *types.Event { + ev := &types.Event{ + Emitter: emitter, + } + + for _, in := range indexed { + ev.Entries = append(ev.Entries, types.EventEntry{ + Flags: 0x01, + Key: in.k, + Codec: cid.Raw, + Value: in.v, + }) + } + + for _, in := range unindexed { + ev.Entries = append(ev.Entries, types.EventEntry{ + Flags: 0x00, + Key: in.k, + Codec: cid.Raw, + Value: in.v, + }) + } + + return ev +} + +type kv struct { + k string + v []byte +} diff --git a/chain/index/gc.go b/chain/index/gc.go new file mode 100644 index 00000000000..5a7377e7263 --- /dev/null +++ b/chain/index/gc.go @@ -0,0 +1,96 @@ +package index + +import ( + "context" + "strconv" + "time" + + logging "github.com/ipfs/go-log/v2" + + "github.com/filecoin-project/lotus/chain/actors/builtin" +) + +var ( + log = logging.Logger("chainindex") + cleanupInterval = time.Duration(4) * time.Hour +) + +func (si *SqliteIndexer) gcLoop() { + defer si.wg.Done() + + // Initial cleanup before entering the loop + si.gc(si.ctx) + + cleanupTicker := time.NewTicker(cleanupInterval) + defer cleanupTicker.Stop() + + for si.ctx.Err() == nil { + if si.isClosed() { + return + } + + select { + case <-cleanupTicker.C: + si.gc(si.ctx) + case <-si.ctx.Done(): + return + } + } +} + +func (si *SqliteIndexer) gc(ctx context.Context) { + if si.gcRetentionEpochs <= 0 { + log.Info("gc retention epochs is not set, skipping gc") + return + } + log.Info("starting index gc") + + head := si.cs.GetHeaviestTipSet() + + removalEpoch := int64(head.Height()) - si.gcRetentionEpochs - 10 // 10 is for some grace period + if removalEpoch <= 0 { + log.Info("no tipsets to gc") + return + } + + log.Infof("gc'ing all (reverted and non-reverted) tipsets before epoch %d", removalEpoch) + + res, err := si.stmts.removeTipsetsBeforeHeightStmt.ExecContext(ctx, removalEpoch) + if err != nil { + log.Errorw("failed to remove reverted tipsets before height", "height", removalEpoch, "error", err) + return + } + + rows, err := res.RowsAffected() + if err != nil { + log.Errorw("failed to get rows affected", "error", err) + return + } + + log.Infof("gc'd %d entries before epoch %d", rows, removalEpoch) + + // ------------------------------------------------------------------------------------------------- + // Also GC eth hashes + + // Convert gcRetentionEpochs to number of days + gcRetentionDays := si.gcRetentionEpochs / (builtin.EpochsInDay) + if gcRetentionDays < 1 { + log.Infof("skipping gc of eth hashes as retention days is less than 1") + return + } + + log.Infof("gc'ing eth hashes older than %d days", gcRetentionDays) + res, err = si.stmts.removeEthHashesOlderThanStmt.ExecContext(ctx, "-"+strconv.Itoa(int(gcRetentionDays))+" day") + if err != nil { + log.Errorf("failed to gc eth hashes older than %d days: %w", gcRetentionDays, err) + return + } + + rows, err = res.RowsAffected() + if err != nil { + log.Errorf("failed to get rows affected: %w", err) + return + } + + log.Infof("gc'd %d eth hashes older than %d days", rows, gcRetentionDays) +} diff --git a/chain/index/gc_test.go b/chain/index/gc_test.go new file mode 100644 index 00000000000..c08c29f636a --- /dev/null +++ b/chain/index/gc_test.go @@ -0,0 +1,123 @@ +package index + +import ( + "context" + pseudo "math/rand" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/chain/types" +) + +func TestGC(t *testing.T) { + ctx := context.Background() + seed := time.Now().UnixNano() + t.Logf("seed: %d", seed) + rng := pseudo.New(pseudo.NewSource(seed)) + headHeight := abi.ChainEpoch(60) + si, _, cs := setupWithHeadIndexed(t, headHeight, rng) + t.Cleanup(func() { _ = si.Close() }) + + si.gcRetentionEpochs = 20 + + ev1 := fakeEvent( + abi.ActorID(1), + []kv{ + {k: "type", v: []byte("approval")}, + {k: "signer", v: []byte("addr1")}, + }, + []kv{ + {k: "amount", v: []byte("2988181")}, + }, + ) + + ev2 := fakeEvent( + abi.ActorID(2), + []kv{ + {k: "type", v: []byte("approval")}, + {k: "signer", v: []byte("addr2")}, + }, + []kv{ + {k: "amount", v: []byte("2988181")}, + }, + ) + + events := []types.Event{*ev1, *ev2} + + fm := fakeMessage(address.TestAddress, address.TestAddress) + em1 := executedMessage{ + msg: fm, + evs: events, + } + + si.SetActorToDelegatedAddresFunc(func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool) { + idAddr, err := address.NewIDAddress(uint64(emitter)) + if err != nil { + return address.Undef, false + } + + return idAddr, true + }) + + si.setExecutedMessagesLoaderFunc(func(ctx context.Context, cs ChainStore, msgTs, rctTs *types.TipSet) ([]executedMessage, error) { + if msgTs.Height() == 1 { + return []executedMessage{em1}, nil + } + return nil, nil + }) + + // Create a fake tipset at height 1 + fakeTipSet1 := fakeTipSet(t, rng, 1, nil) + fakeTipSet2 := fakeTipSet(t, rng, 10, nil) + fakeTipSet3 := fakeTipSet(t, rng, 50, nil) + + // Set the dummy chainstore to return this tipset for height 1 + cs.SetTipsetByHeightAndKey(1, fakeTipSet1.Key(), fakeTipSet1) // empty DB + cs.SetTipsetByHeightAndKey(10, fakeTipSet2.Key(), fakeTipSet2) // empty DB + cs.SetTipsetByHeightAndKey(50, fakeTipSet3.Key(), fakeTipSet3) // empty DB + cs.SetTipSetByCid(t, fakeTipSet1) + cs.SetTipSetByCid(t, fakeTipSet2) + cs.SetTipSetByCid(t, fakeTipSet3) + + cs.SetMessagesForTipset(fakeTipSet1, []types.ChainMsg{fm}) + + // index tipset and events + require.NoError(t, si.Apply(ctx, fakeTipSet1, fakeTipSet2)) + require.NoError(t, si.Apply(ctx, fakeTipSet2, fakeTipSet3)) + + // getLogs works for height 1 + filter := &EventFilter{ + MinHeight: 1, + MaxHeight: 1, + } + ces, err := si.GetEventsForFilter(ctx, filter) + require.NoError(t, err) + require.Len(t, ces, 2) + + si.gc(ctx) + + // getLogs does not work for height 1 + _, err = si.GetEventsForFilter(ctx, filter) + require.Error(t, err) + + // Verify that the tipset at height 1 is removed + var count int + err = si.db.QueryRow("SELECT COUNT(*) FROM tipset_message WHERE height = 1").Scan(&count) + require.NoError(t, err) + require.Equal(t, 0, count) + + // Verify that the tipset at height 10 is not removed + err = si.db.QueryRow("SELECT COUNT(*) FROM tipset_message WHERE height = 10").Scan(&count) + require.NoError(t, err) + require.Equal(t, 0, count) + + // Verify that the tipset at height 50 is not removed + err = si.db.QueryRow("SELECT COUNT(*) FROM tipset_message WHERE height = 50").Scan(&count) + require.NoError(t, err) + require.Equal(t, 1, count) +} diff --git a/chain/index/helpers.go b/chain/index/helpers.go new file mode 100644 index 00000000000..a4db495c99e --- /dev/null +++ b/chain/index/helpers.go @@ -0,0 +1,155 @@ +package index + +import ( + "context" + "database/sql" + "errors" + "os" + "strings" + "time" + + ipld "github.com/ipfs/go-ipld-format" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/types" +) + +const maxRetries = 3 +const retryDelay = 150 * time.Millisecond + +// PopulateFromSnapshot initializes and populates the chain index from a snapshot. +// +// This function creates a new Index at the specified path and populates +// it by using the chain state from the provided ChainStore. It starts from the heaviest +// tipset and works backwards, indexing each tipset until it reaches the genesis +// block or encounters a tipset for which it is unable to find messages in the chain store. +// +// Important Notes: +// 1. This function assumes that the snapshot has already been imported into the ChainStore. +// 2. Events are not populated in the index because snapshots do not contain event data, +// and messages are not re-executed during this process. The resulting index will +// only contain tipsets and messages. +// 3. This function will delete any existing database at the specified path before +// creating a new one. +func PopulateFromSnapshot(ctx context.Context, path string, cs ChainStore) error { + log.Infof("populating chainindex at path %s from snapshot", path) + // Check if a database already exists and attempt to delete it + if _, err := os.Stat(path); err == nil { + log.Infof("deleting existing chainindex at %s", path) + if err = os.Remove(path); err != nil { + return xerrors.Errorf("failed to delete existing chainindex at %s: %w", path, err) + } + } + + si, err := NewSqliteIndexer(path, cs, 0, false, 0) + if err != nil { + return xerrors.Errorf("failed to create sqlite indexer: %w", err) + } + defer func() { + if closeErr := si.Close(); closeErr != nil { + log.Errorf("failed to close sqlite indexer: %s", closeErr) + } + }() + + totalIndexed := 0 + + err = withTx(ctx, si.db, func(tx *sql.Tx) error { + head := cs.GetHeaviestTipSet() + curTs := head + log.Infof("starting to populate chainindex from snapshot at head height %d", head.Height()) + + for curTs != nil { + if err := si.indexTipset(ctx, tx, curTs); err != nil { + if ipld.IsNotFound(err) { + log.Infof("stopping chainindex population at height %d as snapshot only contains data upto this height; error is: %s", curTs.Height(), err) + break + } + + return xerrors.Errorf("failed to populate chainindex from snapshot at height %d: %w", curTs.Height(), err) + } + totalIndexed++ + + curTs, err = cs.GetTipSetFromKey(ctx, curTs.Parents()) + if err != nil { + return xerrors.Errorf("failed to get parent tipset: %w", err) + } + } + + return nil + }) + if err != nil { + return xerrors.Errorf("failed to populate chainindex from snapshot: %w", err) + } + + log.Infof("Successfully populated chainindex from snapshot with %d tipsets", totalIndexed) + return nil +} + +func toTipsetKeyCidBytes(ts *types.TipSet) ([]byte, error) { + if ts == nil { + return nil, errors.New("failed to get tipset key cid: tipset is nil") + } + tsKeyCid, err := ts.Key().Cid() + if err != nil { + return nil, err + } + return tsKeyCid.Bytes(), nil +} + +func withTx(ctx context.Context, db *sql.DB, fn func(*sql.Tx) error) error { + var err error + for i := 0; i < maxRetries; i++ { + if ctx.Err() != nil { + return ctx.Err() + } + var tx *sql.Tx + tx, err = db.BeginTx(ctx, nil) + if err != nil { + return xerrors.Errorf("failed to begin transaction: %w", err) + } + + defer func() { + if p := recover(); p != nil { + // A panic occurred, rollback and repanic + if tx != nil { + _ = tx.Rollback() + } + panic(p) + } + }() + + err = fn(tx) + if err == nil { + if commitErr := tx.Commit(); commitErr != nil { + return xerrors.Errorf("failed to commit transaction: %w", commitErr) + } + return nil + } + + _ = tx.Rollback() + + if !isRetryableError(err) { + return xerrors.Errorf("transaction failed: %w", err) + } + + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(retryDelay): + // Retry after delay + } + } + + return xerrors.Errorf("transaction failed after %d retries; last error: %w", maxRetries, err) +} + +func isRetryableError(err error) bool { + return err != nil && strings.Contains(err.Error(), "database is locked") +} + +func isIndexedFlag(b uint8) bool { + // currently we mark the full entry as indexed if either the key + // or the value are indexed; in the future we will need finer-grained + // management of indices + return b&(types.EventFlagIndexedKey|types.EventFlagIndexedValue) > 0 +} diff --git a/chain/index/indexer.go b/chain/index/indexer.go new file mode 100644 index 00000000000..5d5e6fbca96 --- /dev/null +++ b/chain/index/indexer.go @@ -0,0 +1,416 @@ +package index + +import ( + "context" + "database/sql" + "sync" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/types/ethtypes" + "github.com/filecoin-project/lotus/lib/sqlite" +) + +var _ Indexer = (*SqliteIndexer)(nil) + +// ActorToDelegatedAddressFunc is a function type that resolves an actor ID to a DelegatedAddress if one exists for that actor, otherwise returns nil +type ActorToDelegatedAddressFunc func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool) +type emsLoaderFunc func(ctx context.Context, cs ChainStore, msgTs, rctTs *types.TipSet) ([]executedMessage, error) +type RecomputeTipSetStateFunc func(ctx context.Context, ts *types.TipSet) error + +type preparedStatements struct { + insertEthTxHashStmt *sql.Stmt + getNonRevertedMsgInfoStmt *sql.Stmt + getMsgCidFromEthHashStmt *sql.Stmt + insertTipsetMessageStmt *sql.Stmt + updateTipsetToRevertedStmt *sql.Stmt + hasTipsetStmt *sql.Stmt + updateTipsetToNonRevertedStmt *sql.Stmt + removeTipsetsBeforeHeightStmt *sql.Stmt + removeEthHashesOlderThanStmt *sql.Stmt + updateTipsetsToRevertedFromHeightStmt *sql.Stmt + updateEventsToRevertedFromHeightStmt *sql.Stmt + isIndexEmptyStmt *sql.Stmt + getMinNonRevertedHeightStmt *sql.Stmt + hasNonRevertedTipsetStmt *sql.Stmt + updateEventsToRevertedStmt *sql.Stmt + updateEventsToNonRevertedStmt *sql.Stmt + getMsgIdForMsgCidAndTipsetStmt *sql.Stmt + insertEventStmt *sql.Stmt + insertEventEntryStmt *sql.Stmt + + hasNullRoundAtHeightStmt *sql.Stmt + getNonRevertedTipsetAtHeightStmt *sql.Stmt + countTipsetsAtHeightStmt *sql.Stmt + + getNonRevertedTipsetMessageCountStmt *sql.Stmt + getNonRevertedTipsetEventCountStmt *sql.Stmt + getNonRevertedTipsetEventEntriesCountStmt *sql.Stmt + hasRevertedEventsInTipsetStmt *sql.Stmt + removeRevertedTipsetsBeforeHeightStmt *sql.Stmt +} + +type SqliteIndexer struct { + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup + + db *sql.DB + cs ChainStore + + actorToDelegatedAddresFunc ActorToDelegatedAddressFunc + executedMessagesLoaderFunc emsLoaderFunc + + stmts *preparedStatements + + gcRetentionEpochs int64 + reconcileEmptyIndex bool + maxReconcileTipsets uint64 + + mu sync.Mutex + updateSubs map[uint64]*updateSub + subIdCounter uint64 + + started bool + + closeLk sync.RWMutex + closed bool + + // ensures writes are serialized so backfilling does not race with index updates + writerLk sync.Mutex +} + +func NewSqliteIndexer(path string, cs ChainStore, gcRetentionEpochs int64, reconcileEmptyIndex bool, + maxReconcileTipsets uint64) (si *SqliteIndexer, err error) { + + if gcRetentionEpochs != 0 && gcRetentionEpochs < builtin.EpochsInDay { + return nil, xerrors.Errorf("gc retention epochs must be 0 or greater than %d", builtin.EpochsInDay) + } + + db, err := sqlite.Open(path) + if err != nil { + return nil, xerrors.Errorf("failed to setup message index db: %w", err) + } + + ctx, cancel := context.WithCancel(context.Background()) + + defer func() { + if err != nil { + _ = db.Close() + cancel() + } + }() + + err = sqlite.InitDb(ctx, "chain index", db, ddls, []sqlite.MigrationFunc{}) + if err != nil { + return nil, xerrors.Errorf("failed to init chain index db: %w", err) + } + + si = &SqliteIndexer{ + ctx: ctx, + cancel: cancel, + db: db, + cs: cs, + updateSubs: make(map[uint64]*updateSub), + subIdCounter: 0, + gcRetentionEpochs: gcRetentionEpochs, + reconcileEmptyIndex: reconcileEmptyIndex, + maxReconcileTipsets: maxReconcileTipsets, + stmts: &preparedStatements{}, + } + + if err = si.initStatements(); err != nil { + return nil, xerrors.Errorf("failed to prepare statements: %w", err) + } + + return si, nil +} + +func (si *SqliteIndexer) Start() { + si.wg.Add(1) + go si.gcLoop() + + si.started = true +} + +func (si *SqliteIndexer) SetActorToDelegatedAddresFunc(actorToDelegatedAddresFunc ActorToDelegatedAddressFunc) { + si.actorToDelegatedAddresFunc = actorToDelegatedAddresFunc +} + +func (si *SqliteIndexer) SetRecomputeTipSetStateFunc(f RecomputeTipSetStateFunc) { + si.buildExecutedMessagesLoader(f) +} + +func (si *SqliteIndexer) buildExecutedMessagesLoader(rf RecomputeTipSetStateFunc) { + si.executedMessagesLoaderFunc = func(ctx context.Context, cs ChainStore, msgTs, rctTs *types.TipSet) ([]executedMessage, error) { + return loadExecutedMessages(ctx, cs, rf, msgTs, rctTs) + } +} + +func (si *SqliteIndexer) Close() error { + si.closeLk.Lock() + defer si.closeLk.Unlock() + if si.closed { + return nil + } + si.closed = true + + if si.db == nil { + return nil + } + si.cancel() + si.wg.Wait() + + if err := si.db.Close(); err != nil { + return xerrors.Errorf("failed to close db: %w", err) + } + return nil +} + +func (si *SqliteIndexer) initStatements() error { + stmtMapping := preparedStatementMapping(si.stmts) + for stmtPointer, query := range stmtMapping { + var err error + *stmtPointer, err = si.db.Prepare(query) + if err != nil { + return xerrors.Errorf("prepare statement [%s]: %w", query, err) + } + } + + return nil +} + +func (si *SqliteIndexer) IndexEthTxHash(ctx context.Context, txHash ethtypes.EthHash, msgCid cid.Cid) error { + if si.isClosed() { + return ErrClosed + } + + return withTx(ctx, si.db, func(tx *sql.Tx) error { + return si.indexEthTxHash(ctx, tx, txHash, msgCid) + }) +} + +func (si *SqliteIndexer) indexEthTxHash(ctx context.Context, tx *sql.Tx, txHash ethtypes.EthHash, msgCid cid.Cid) error { + insertEthTxHashStmt := tx.Stmt(si.stmts.insertEthTxHashStmt) + _, err := insertEthTxHashStmt.ExecContext(ctx, txHash.String(), msgCid.Bytes()) + if err != nil { + return xerrors.Errorf("failed to index eth tx hash: %w", err) + } + + return nil +} + +func (si *SqliteIndexer) IndexSignedMessage(ctx context.Context, msg *types.SignedMessage) error { + if msg.Signature.Type != crypto.SigTypeDelegated { + return nil + } + + if si.isClosed() { + return ErrClosed + } + + return withTx(ctx, si.db, func(tx *sql.Tx) error { + return si.indexSignedMessage(ctx, tx, msg) + }) +} + +func (si *SqliteIndexer) indexSignedMessage(ctx context.Context, tx *sql.Tx, msg *types.SignedMessage) error { + ethTx, err := ethtypes.EthTransactionFromSignedFilecoinMessage(msg) + if err != nil { + return xerrors.Errorf("failed to convert filecoin message to eth tx: %w", err) + } + + txHash, err := ethTx.TxHash() + if err != nil { + return xerrors.Errorf("failed to hash transaction: %w", err) + } + + return si.indexEthTxHash(ctx, tx, txHash, msg.Cid()) +} + +func (si *SqliteIndexer) Apply(ctx context.Context, from, to *types.TipSet) error { + if si.isClosed() { + return ErrClosed + } + + si.writerLk.Lock() + + // We're moving the chain ahead from the `from` tipset to the `to` tipset + // Height(to) > Height(from) + err := withTx(ctx, si.db, func(tx *sql.Tx) error { + if err := si.indexTipsetWithParentEvents(ctx, tx, from, to); err != nil { + return xerrors.Errorf("failed to index tipset: %w", err) + } + + return nil + }) + + if err != nil { + si.writerLk.Unlock() + return xerrors.Errorf("failed to apply tipset: %w", err) + } + si.writerLk.Unlock() + + si.notifyUpdateSubs() + + return nil +} + +func (si *SqliteIndexer) indexTipset(ctx context.Context, tx *sql.Tx, ts *types.TipSet) error { + tsKeyCidBytes, err := toTipsetKeyCidBytes(ts) + if err != nil { + return xerrors.Errorf("failed to compute tipset cid: %w", err) + } + + if restored, err := si.restoreTipsetIfExists(ctx, tx, tsKeyCidBytes); err != nil { + return xerrors.Errorf("failed to restore tipset: %w", err) + } else if restored { + return nil + } + + height := ts.Height() + insertTipsetMsgStmt := tx.Stmt(si.stmts.insertTipsetMessageStmt) + + msgs, err := si.cs.MessagesForTipset(ctx, ts) + if err != nil { + return xerrors.Errorf("failed to get messages for tipset: %w", err) + } + + if len(msgs) == 0 { + // If there are no messages, just insert the tipset and return + if _, err := insertTipsetMsgStmt.ExecContext(ctx, tsKeyCidBytes, height, 0, nil, -1); err != nil { + return xerrors.Errorf("failed to insert empty tipset: %w", err) + } + return nil + } + + for i, msg := range msgs { + msg := msg + if _, err := insertTipsetMsgStmt.ExecContext(ctx, tsKeyCidBytes, height, 0, msg.Cid().Bytes(), i); err != nil { + return xerrors.Errorf("failed to insert tipset message: %w", err) + } + } + + for _, blk := range ts.Blocks() { + blk := blk + _, smsgs, err := si.cs.MessagesForBlock(ctx, blk) + if err != nil { + return xerrors.Errorf("failed to get messages for block: %w", err) + } + + for _, smsg := range smsgs { + smsg := smsg + if smsg.Signature.Type != crypto.SigTypeDelegated { + continue + } + if err := si.indexSignedMessage(ctx, tx, smsg); err != nil { + return xerrors.Errorf("failed to index eth tx hash: %w", err) + } + } + } + + return nil +} + +func (si *SqliteIndexer) indexTipsetWithParentEvents(ctx context.Context, tx *sql.Tx, parentTs *types.TipSet, currentTs *types.TipSet) error { + // Index the parent tipset if it doesn't exist yet. + // This is necessary to properly index events produced by executing + // messages included in the parent tipset by the current tipset (deferred execution). + if err := si.indexTipset(ctx, tx, parentTs); err != nil { + return xerrors.Errorf("failed to index parent tipset: %w", err) + } + if err := si.indexTipset(ctx, tx, currentTs); err != nil { + return xerrors.Errorf("failed to index tipset: %w", err) + } + + // Now Index events + if err := si.indexEvents(ctx, tx, parentTs, currentTs); err != nil { + return xerrors.Errorf("failed to index events: %w", err) + } + + return nil +} + +func (si *SqliteIndexer) restoreTipsetIfExists(ctx context.Context, tx *sql.Tx, tsKeyCidBytes []byte) (bool, error) { + // Check if the tipset already exists + var exists bool + if err := tx.Stmt(si.stmts.hasTipsetStmt).QueryRowContext(ctx, tsKeyCidBytes).Scan(&exists); err != nil { + return false, xerrors.Errorf("failed to check if tipset exists: %w", err) + } + if exists { + if _, err := tx.Stmt(si.stmts.updateTipsetToNonRevertedStmt).ExecContext(ctx, tsKeyCidBytes); err != nil { + return false, xerrors.Errorf("failed to restore tipset: %w", err) + } + return true, nil + } + return false, nil +} + +func (si *SqliteIndexer) Revert(ctx context.Context, from, to *types.TipSet) error { + if si.isClosed() { + return ErrClosed + } + + // We're reverting the chain from the tipset at `from` to the tipset at `to`. + // Height(to) < Height(from) + + revertTsKeyCid, err := toTipsetKeyCidBytes(from) + if err != nil { + return xerrors.Errorf("failed to get tipset key cid: %w", err) + } + + // Because of deferred execution in Filecoin, events at tipset T are reverted when a tipset T+1 is reverted. + // However, the tipet `T` itself is not reverted. + eventTsKeyCid, err := toTipsetKeyCidBytes(to) + if err != nil { + return xerrors.Errorf("failed to get tipset key cid: %w", err) + } + + si.writerLk.Lock() + + err = withTx(ctx, si.db, func(tx *sql.Tx) error { + // revert the `from` tipset + if _, err := tx.Stmt(si.stmts.updateTipsetToRevertedStmt).ExecContext(ctx, revertTsKeyCid); err != nil { + return xerrors.Errorf("failed to mark tipset %s as reverted: %w", revertTsKeyCid, err) + } + + // index the `to` tipset -> it is idempotent + if err := si.indexTipset(ctx, tx, to); err != nil { + return xerrors.Errorf("failed to index tipset: %w", err) + } + + // events are indexed against the message inclusion tipset, not the message execution tipset. + // So we need to revert the events for the message inclusion tipset i.e. `to` tipset. + if _, err := tx.Stmt(si.stmts.updateEventsToRevertedStmt).ExecContext(ctx, eventTsKeyCid); err != nil { + return xerrors.Errorf("failed to revert events for tipset %s: %w", eventTsKeyCid, err) + } + + return nil + }) + if err != nil { + si.writerLk.Unlock() + return xerrors.Errorf("failed during revert transaction: %w", err) + } + + si.writerLk.Unlock() + si.notifyUpdateSubs() + + return nil +} + +func (si *SqliteIndexer) isClosed() bool { + si.closeLk.RLock() + defer si.closeLk.RUnlock() + return si.closed +} + +func (si *SqliteIndexer) setExecutedMessagesLoaderFunc(f emsLoaderFunc) { + si.executedMessagesLoaderFunc = f +} diff --git a/chain/index/indexer_test.go b/chain/index/indexer_test.go new file mode 100644 index 00000000000..bc4a7a70c4f --- /dev/null +++ b/chain/index/indexer_test.go @@ -0,0 +1,56 @@ +package index + +import ( + "context" + "database/sql" + pseudo "math/rand" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestRestoreTipsetIfExists(t *testing.T) { + ctx := context.Background() + seed := time.Now().UnixNano() + t.Logf("seed: %d", seed) + rng := pseudo.New(pseudo.NewSource(seed)) + si, _, _ := setupWithHeadIndexed(t, 10, rng) + + tsKeyCid := randomCid(t, rng) + tsKeyCidBytes := tsKeyCid.Bytes() + + err := withTx(ctx, si.db, func(tx *sql.Tx) error { + // tipset does not exist + exists, err := si.restoreTipsetIfExists(ctx, tx, tsKeyCidBytes) + require.NoError(t, err) + require.False(t, exists) + + // insert reverted tipset + _, err = tx.Stmt(si.stmts.insertTipsetMessageStmt).Exec(tsKeyCidBytes, 1, 1, randomCid(t, rng).Bytes(), 0) + require.NoError(t, err) + + // tipset exists and is NOT reverted + exists, err = si.restoreTipsetIfExists(ctx, tx, tsKeyCidBytes) + require.NoError(t, err) + require.True(t, exists) + + // Verify that the tipset is not reverted + var reverted bool + err = tx.QueryRow("SELECT reverted FROM tipset_message WHERE tipset_key_cid = ?", tsKeyCidBytes).Scan(&reverted) + require.NoError(t, err) + require.False(t, reverted, "Tipset should not be reverted") + + return nil + }) + require.NoError(t, err) + + exists, err := si.isTipsetIndexed(ctx, tsKeyCidBytes) + require.NoError(t, err) + require.True(t, exists) + + fc := randomCid(t, rng) + exists, err = si.isTipsetIndexed(ctx, fc.Bytes()) + require.NoError(t, err) + require.False(t, exists) +} diff --git a/chain/index/interface.go b/chain/index/interface.go index f875a94bf79..e312648e6cf 100644 --- a/chain/index/interface.go +++ b/chain/index/interface.go @@ -6,10 +6,16 @@ import ( "github.com/ipfs/go-cid" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/types/ethtypes" ) -var ErrNotFound = errors.New("message not found") +var ErrNotFound = errors.New("not found in index") var ErrClosed = errors.New("index closed") // MsgInfo is the Message metadata the index tracks. @@ -22,24 +28,60 @@ type MsgInfo struct { Epoch abi.ChainEpoch } -// MsgIndex is the interface to the message index -type MsgIndex interface { - // GetMsgInfo retrieves the message metadata through the index. - // The lookup is done using the onchain message Cid; that is the signed message Cid - // for SECP messages and unsigned message Cid for BLS messages. - GetMsgInfo(ctx context.Context, m cid.Cid) (MsgInfo, error) - // Close closes the index - Close() error +type CollectedEvent struct { + Entries []types.EventEntry + EmitterAddr address.Address // address of emitter + EventIdx int // index of the event within the list of emitted events in a given tipset + Reverted bool + Height abi.ChainEpoch + TipSetKey types.TipSetKey // tipset that contained the message + MsgIdx int // index of the message in the tipset + MsgCid cid.Cid // cid of message that produced event +} + +type EventFilter struct { + MinHeight abi.ChainEpoch // minimum epoch to apply filter or -1 if no minimum + MaxHeight abi.ChainEpoch // maximum epoch to apply filter or -1 if no maximum + TipsetCid cid.Cid + Addresses []address.Address // list of actor addresses that are extpected to emit the event + + KeysWithCodec map[string][]types.ActorEventBlock // map of key names to a list of alternate values that may match + MaxResults int // maximum number of results to collect, 0 is unlimited } -type dummyMsgIndex struct{} +type Indexer interface { + Start() + ReconcileWithChain(ctx context.Context, currHead *types.TipSet) error + IndexSignedMessage(ctx context.Context, msg *types.SignedMessage) error + IndexEthTxHash(ctx context.Context, txHash ethtypes.EthHash, c cid.Cid) error + + SetActorToDelegatedAddresFunc(idToRobustAddrFunc ActorToDelegatedAddressFunc) + SetRecomputeTipSetStateFunc(f RecomputeTipSetStateFunc) -func (dummyMsgIndex) GetMsgInfo(ctx context.Context, m cid.Cid) (MsgInfo, error) { - return MsgInfo{}, ErrNotFound + Apply(ctx context.Context, from, to *types.TipSet) error + Revert(ctx context.Context, from, to *types.TipSet) error + + // Returns (cid.Undef, nil) if the message was not found + GetCidFromHash(ctx context.Context, hash ethtypes.EthHash) (cid.Cid, error) + // Returns (nil, ErrNotFound) if the message was not found + GetMsgInfo(ctx context.Context, m cid.Cid) (*MsgInfo, error) + + GetEventsForFilter(ctx context.Context, f *EventFilter) ([]*CollectedEvent, error) + + ChainValidateIndex(ctx context.Context, epoch abi.ChainEpoch, backfill bool) (*types.IndexValidation, error) + + Close() error } -func (dummyMsgIndex) Close() error { - return nil +type ChainStore interface { + MessagesForTipset(ctx context.Context, ts *types.TipSet) ([]types.ChainMsg, error) + GetHeaviestTipSet() *types.TipSet + GetTipSetByCid(ctx context.Context, tsKeyCid cid.Cid) (*types.TipSet, error) + GetTipSetFromKey(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) + MessagesForBlock(ctx context.Context, b *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) + ActorStore(ctx context.Context) adt.Store + GetTipsetByHeight(ctx context.Context, h abi.ChainEpoch, ts *types.TipSet, prev bool) (*types.TipSet, error) + IsStoringEvents() bool } -var DummyMsgIndex MsgIndex = dummyMsgIndex{} +var _ ChainStore = (*store.ChainStore)(nil) diff --git a/chain/index/msgindex.go b/chain/index/msgindex.go deleted file mode 100644 index f5248f2782e..00000000000 --- a/chain/index/msgindex.go +++ /dev/null @@ -1,499 +0,0 @@ -package index - -import ( - "context" - "database/sql" - "os" - "sync" - "time" - - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - _ "github.com/mattn/go-sqlite3" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/sqlite" -) - -const DefaultDbFilename = "msgindex.db" - -var log = logging.Logger("msgindex") - -var ddls = []string{ - `CREATE TABLE IF NOT EXISTS messages ( - cid VARCHAR(80) PRIMARY KEY ON CONFLICT REPLACE, - tipset_cid VARCHAR(80) NOT NULL, - epoch INTEGER NOT NULL - )`, - `CREATE INDEX IF NOT EXISTS tipset_cids ON messages (tipset_cid)`, -} - -const ( - // prepared stmts - dbqGetMessageInfo = "SELECT tipset_cid, epoch FROM messages WHERE cid = ?" - dbqInsertMessage = "INSERT INTO messages VALUES (?, ?, ?)" - dbqDeleteTipsetMessages = "DELETE FROM messages WHERE tipset_cid = ?" - // reconciliation - dbqCountMessages = "SELECT COUNT(*) FROM messages" - dbqMinEpoch = "SELECT MIN(epoch) FROM messages" - dbqCountTipsetMessages = "SELECT COUNT(*) FROM messages WHERE tipset_cid = ?" - dbqDeleteMessagesByEpoch = "DELETE FROM messages WHERE epoch >= ?" -) - -// coalescer configuration (TODO: use observer instead) -// these are exposed to make tests snappy -var ( - CoalesceMinDelay = time.Second - CoalesceMaxDelay = 15 * time.Second - CoalesceMergeInterval = time.Second -) - -// ChainStore interface; we could use store.ChainStore directly, -// but this simplifies unit testing. -type ChainStore interface { - SubscribeHeadChanges(f store.ReorgNotifee) - MessagesForTipset(ctx context.Context, ts *types.TipSet) ([]types.ChainMsg, error) - GetHeaviestTipSet() *types.TipSet - GetTipSetFromKey(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) -} - -var _ ChainStore = (*store.ChainStore)(nil) - -type msgIndex struct { - cs ChainStore - - db *sql.DB - selectMsgStmt *sql.Stmt - insertMsgStmt *sql.Stmt - deleteTipSetStmt *sql.Stmt - - sema chan struct{} - mx sync.Mutex - pend []headChange - - cancel func() - workers sync.WaitGroup - closeLk sync.RWMutex - closed bool -} - -var _ MsgIndex = (*msgIndex)(nil) - -type headChange struct { - rev []*types.TipSet - app []*types.TipSet -} - -func NewMsgIndex(lctx context.Context, path string, cs ChainStore) (MsgIndex, error) { - db, exists, err := sqlite.Open(path) - if err != nil { - return nil, xerrors.Errorf("failed to setup message index db: %w", err) - } - - if err = sqlite.InitDb(lctx, "message index", db, ddls, []sqlite.MigrationFunc{}); err != nil { - _ = db.Close() - return nil, xerrors.Errorf("failed to init message index db: %w", err) - } - - // TODO we may consider populating the index when first creating the db - if exists { - if err := reconcileIndex(db, cs); err != nil { - return nil, xerrors.Errorf("error reconciling msgindex database: %w", err) - } - } - - ctx, cancel := context.WithCancel(lctx) - - msgIndex := &msgIndex{ - db: db, - cs: cs, - sema: make(chan struct{}, 1), - cancel: cancel, - } - - err = msgIndex.prepareStatements() - if err != nil { - if err := db.Close(); err != nil { - log.Errorf("error closing msgindex database: %s", err) - } - - return nil, xerrors.Errorf("error preparing msgindex database statements: %w", err) - } - - rnf := store.WrapHeadChangeCoalescer( - msgIndex.onHeadChange, - CoalesceMinDelay, - CoalesceMaxDelay, - CoalesceMergeInterval, - ) - cs.SubscribeHeadChanges(rnf) - - msgIndex.workers.Add(1) - go msgIndex.background(ctx) - - return msgIndex, nil -} - -func PopulateAfterSnapshot(lctx context.Context, path string, cs ChainStore) error { - // if a database already exists, we try to delete it and create a new one - if _, err := os.Stat(path); err == nil { - if err = os.Remove(path); err != nil { - return xerrors.Errorf("msgindex already exists at %s and can't be deleted", path) - } - } - - db, _, err := sqlite.Open(path) - if err != nil { - return xerrors.Errorf("failed to setup message index db: %w", err) - } - defer func() { - if err := db.Close(); err != nil { - log.Errorf("error closing msgindex database: %s", err) - } - }() - - if err := sqlite.InitDb(lctx, "message index", db, ddls, []sqlite.MigrationFunc{}); err != nil { - _ = db.Close() - return xerrors.Errorf("error creating msgindex database: %w", err) - } - - tx, err := db.Begin() - if err != nil { - return xerrors.Errorf("error when starting transaction: %w", err) - } - - rollback := func() { - if err := tx.Rollback(); err != nil { - log.Errorf("error in rollback: %s", err) - } - } - - insertStmt, err := tx.Prepare(dbqInsertMessage) - if err != nil { - rollback() - return xerrors.Errorf("error preparing insertStmt: %w", err) - } - - curTs := cs.GetHeaviestTipSet() - startHeight := curTs.Height() - for curTs != nil { - tscid, err := curTs.Key().Cid() - if err != nil { - rollback() - return xerrors.Errorf("error computing tipset cid: %w", err) - } - - tskey := tscid.String() - epoch := int64(curTs.Height()) - - msgs, err := cs.MessagesForTipset(lctx, curTs) - if err != nil { - log.Infof("stopping import after %d tipsets", startHeight-curTs.Height()) - break - } - - for _, msg := range msgs { - key := msg.Cid().String() - if _, err := insertStmt.Exec(key, tskey, epoch); err != nil { - rollback() - return xerrors.Errorf("error inserting message: %w", err) - } - } - - curTs, err = cs.GetTipSetFromKey(lctx, curTs.Parents()) - if err != nil { - rollback() - return xerrors.Errorf("error walking chain: %w", err) - } - } - - err = tx.Commit() - if err != nil { - return xerrors.Errorf("error committing transaction: %w", err) - } - - return nil -} - -func reconcileIndex(db *sql.DB, cs ChainStore) error { - // Invariant: after reconciliation, every tipset in the index is in the current chain; ie either - // the chain head or reachable by walking the chain. - // Algorithm: - // 1. Count messages in index; if none, trivially reconciled. - // TODO we may consider populating the index in that case - // 2. Find the minimum tipset in the index; this will mark the end of the reconciliation walk - // 3. Walk from current tipset until we find a tipset in the index. - // 4. Delete (revert!) all tipsets above the found tipset. - // 5. If the walk ends in the boundary epoch, then delete everything. - // - - row := db.QueryRow(dbqCountMessages) - - var result int64 - if err := row.Scan(&result); err != nil { - return xerrors.Errorf("error counting messages: %w", err) - } - - if result == 0 { - return nil - } - - row = db.QueryRow(dbqMinEpoch) - if err := row.Scan(&result); err != nil { - return xerrors.Errorf("error finding boundary epoch: %w", err) - } - - boundaryEpoch := abi.ChainEpoch(result) - - countMsgsStmt, err := db.Prepare(dbqCountTipsetMessages) - if err != nil { - return xerrors.Errorf("error preparing statement: %w", err) - } - - curTs := cs.GetHeaviestTipSet() - for curTs != nil && curTs.Height() >= boundaryEpoch { - tsCid, err := curTs.Key().Cid() - if err != nil { - return xerrors.Errorf("error computing tipset cid: %w", err) - } - - key := tsCid.String() - row = countMsgsStmt.QueryRow(key) - if err := row.Scan(&result); err != nil { - return xerrors.Errorf("error counting messages: %w", err) - } - - if result > 0 { - // found it! - boundaryEpoch = curTs.Height() + 1 - break - } - - // walk up - parents := curTs.Parents() - curTs, err = cs.GetTipSetFromKey(context.TODO(), parents) - if err != nil { - return xerrors.Errorf("error walking chain: %w", err) - } - } - - // delete everything above the minEpoch - if _, err = db.Exec(dbqDeleteMessagesByEpoch, int64(boundaryEpoch)); err != nil { - return xerrors.Errorf("error deleting stale reorged out message: %w", err) - } - - return nil -} - -func (x *msgIndex) prepareStatements() error { - stmt, err := x.db.Prepare(dbqGetMessageInfo) - if err != nil { - return xerrors.Errorf("prepare selectMsgStmt: %w", err) - } - x.selectMsgStmt = stmt - - stmt, err = x.db.Prepare(dbqInsertMessage) - if err != nil { - return xerrors.Errorf("prepare insertMsgStmt: %w", err) - } - x.insertMsgStmt = stmt - - stmt, err = x.db.Prepare(dbqDeleteTipsetMessages) - if err != nil { - return xerrors.Errorf("prepare deleteTipSetStmt: %w", err) - } - x.deleteTipSetStmt = stmt - - return nil -} - -// head change notifee -func (x *msgIndex) onHeadChange(rev, app []*types.TipSet) error { - x.closeLk.RLock() - defer x.closeLk.RUnlock() - - if x.closed { - return nil - } - - // do it in the background to avoid blocking head change processing - x.mx.Lock() - x.pend = append(x.pend, headChange{rev: rev, app: app}) - pendLen := len(x.pend) - x.mx.Unlock() - - // complain loudly if this is building backlog - if pendLen > 10 { - log.Warnf("message index head change processing is building backlog: %d pending head changes", pendLen) - } - - select { - case x.sema <- struct{}{}: - default: - } - - return nil -} - -func (x *msgIndex) background(ctx context.Context) { - defer x.workers.Done() - - for { - select { - case <-x.sema: - err := x.processHeadChanges(ctx) - if err != nil { - // we can't rely on an inconsistent index, so shut it down. - log.Errorf("error processing head change notifications: %s; shutting down message index", err) - if err2 := x.Close(); err2 != nil { - log.Errorf("error shutting down index: %s", err2) - } - } - - case <-ctx.Done(): - return - } - } -} - -func (x *msgIndex) processHeadChanges(ctx context.Context) error { - x.mx.Lock() - pend := x.pend - x.pend = nil - x.mx.Unlock() - - tx, err := x.db.Begin() - if err != nil { - return xerrors.Errorf("error creating transaction: %w", err) - } - - for _, hc := range pend { - for _, ts := range hc.rev { - if err := x.doRevert(ctx, tx, ts); err != nil { - if err2 := tx.Rollback(); err2 != nil { - log.Errorf("error rolling back transaction: %s", err2) - } - return xerrors.Errorf("error reverting %s: %w", ts, err) - } - } - - for _, ts := range hc.app { - if err := x.doApply(ctx, tx, ts); err != nil { - if err2 := tx.Rollback(); err2 != nil { - log.Errorf("error rolling back transaction: %s", err2) - } - return xerrors.Errorf("error applying %s: %w", ts, err) - } - } - } - - return tx.Commit() -} - -func (x *msgIndex) doRevert(ctx context.Context, tx *sql.Tx, ts *types.TipSet) error { - tskey, err := ts.Key().Cid() - if err != nil { - return xerrors.Errorf("error computing tipset cid: %w", err) - } - - key := tskey.String() - _, err = tx.Stmt(x.deleteTipSetStmt).Exec(key) - return err -} - -func (x *msgIndex) doApply(ctx context.Context, tx *sql.Tx, ts *types.TipSet) error { - tscid, err := ts.Key().Cid() - if err != nil { - return xerrors.Errorf("error computing tipset cid: %w", err) - } - - tskey := tscid.String() - epoch := int64(ts.Height()) - - msgs, err := x.cs.MessagesForTipset(ctx, ts) - if err != nil { - return xerrors.Errorf("error retrieving messages for tipset %s: %w", ts, err) - } - - insertStmt := tx.Stmt(x.insertMsgStmt) - for _, msg := range msgs { - key := msg.Cid().String() - if _, err := insertStmt.Exec(key, tskey, epoch); err != nil { - return xerrors.Errorf("error inserting message: %w", err) - } - } - - return nil -} - -// interface -func (x *msgIndex) GetMsgInfo(ctx context.Context, m cid.Cid) (MsgInfo, error) { - x.closeLk.RLock() - defer x.closeLk.RUnlock() - - if x.closed { - return MsgInfo{}, ErrClosed - } - - var ( - tipset string - epoch int64 - ) - - key := m.String() - row := x.selectMsgStmt.QueryRow(key) - err := row.Scan(&tipset, &epoch) - switch { - case err == sql.ErrNoRows: - return MsgInfo{}, ErrNotFound - - case err != nil: - return MsgInfo{}, xerrors.Errorf("error querying msgindex database: %w", err) - } - - tipsetCid, err := cid.Decode(tipset) - if err != nil { - return MsgInfo{}, xerrors.Errorf("error decoding tipset cid: %w", err) - } - - return MsgInfo{ - Message: m, - TipSet: tipsetCid, - Epoch: abi.ChainEpoch(epoch), - }, nil -} - -func (x *msgIndex) Close() error { - x.closeLk.Lock() - defer x.closeLk.Unlock() - - if x.closed { - return nil - } - - x.closed = true - - x.cancel() - x.workers.Wait() - - return x.db.Close() -} - -// informal apis for itests; not exposed in the main interface -func (x *msgIndex) CountMessages() (int64, error) { - x.closeLk.RLock() - defer x.closeLk.RUnlock() - - if x.closed { - return 0, ErrClosed - } - - var result int64 - row := x.db.QueryRow(dbqCountMessages) - err := row.Scan(&result) - return result, err -} diff --git a/chain/index/msgindex_test.go b/chain/index/msgindex_test.go deleted file mode 100644 index 2cf707b0fed..00000000000 --- a/chain/index/msgindex_test.go +++ /dev/null @@ -1,307 +0,0 @@ -package index - -import ( - "context" - "errors" - "math/rand" - "os" - "testing" - "time" - - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-address" - - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/types/mock" -) - -func TestBasicMsgIndex(t *testing.T) { - // the most basic of tests: - // 1. Create an index with mock chain store - // 2. Advance the chain for a few tipsets - // 3. Verify that the index contains all messages with the correct tipset/epoch - cs := newMockChainStore() - cs.genesis() - - tmp := t.TempDir() - t.Cleanup(func() { _ = os.RemoveAll(tmp) }) - - msgIndex, err := NewMsgIndex(context.Background(), tmp+"/msgindex.db", cs) - require.NoError(t, err) - - defer msgIndex.Close() //nolint - - for i := 0; i < 10; i++ { - t.Logf("advance to epoch %d", i+1) - err := cs.advance() - require.NoError(t, err) - } - - waitForCoalescerAfterLastEvent() - - t.Log("verifying index") - verifyIndex(t, cs, msgIndex) -} - -func TestReorgMsgIndex(t *testing.T) { - // slightly more nuanced test that includes reorgs - // 1. Create an index with mock chain store - // 2. Advance/Reorg the chain for a few tipsets - // 3. Verify that the index contains all messages with the correct tipset/epoch - cs := newMockChainStore() - cs.genesis() - - tmp := t.TempDir() - t.Cleanup(func() { _ = os.RemoveAll(tmp) }) - - msgIndex, err := NewMsgIndex(context.Background(), tmp+"/msgindex.db", cs) - require.NoError(t, err) - - defer msgIndex.Close() //nolint - - for i := 0; i < 10; i++ { - t.Logf("advance to epoch %d", i+1) - err := cs.advance() - require.NoError(t, err) - } - - waitForCoalescerAfterLastEvent() - - // a simple reorg - t.Log("doing reorg") - reorgme := cs.curTs - reorgmeParent, err := cs.GetTipSetFromKey(context.Background(), reorgme.Parents()) - require.NoError(t, err) - cs.setHead(reorgmeParent) - reorgmeChild := cs.makeBlk() - err = cs.reorg([]*types.TipSet{reorgme}, []*types.TipSet{reorgmeChild}) - require.NoError(t, err) - - waitForCoalescerAfterLastEvent() - - t.Log("verifying index") - verifyIndex(t, cs, msgIndex) - - t.Log("verifying that reorged messages are not present") - verifyMissing(t, cs, msgIndex, reorgme) -} - -func TestReconcileMsgIndex(t *testing.T) { - // test that exercises the reconciliation code paths - // 1. Create and populate a basic msgindex, similar to TestBasicMsgIndex. - // 2. Close it - // 3. Reorg the mock chain store - // 4. Reopen the index to trigger reconciliation - // 5. Enxure that only the stable messages remain. - cs := newMockChainStore() - cs.genesis() - - tmp := t.TempDir() - t.Cleanup(func() { _ = os.RemoveAll(tmp) }) - - msgIndex, err := NewMsgIndex(context.Background(), tmp+"/msgindex.db", cs) - require.NoError(t, err) - - for i := 0; i < 10; i++ { - t.Logf("advance to epoch %d", i+1) - err := cs.advance() - require.NoError(t, err) - } - - waitForCoalescerAfterLastEvent() - - // Close it and reorg - err = msgIndex.Close() - require.NoError(t, err) - cs.notify = nil - - // a simple reorg - t.Log("doing reorg") - reorgme := cs.curTs - reorgmeParent, err := cs.GetTipSetFromKey(context.Background(), reorgme.Parents()) - require.NoError(t, err) - cs.setHead(reorgmeParent) - reorgmeChild := cs.makeBlk() - err = cs.reorg([]*types.TipSet{reorgme}, []*types.TipSet{reorgmeChild}) - require.NoError(t, err) - - // reopen to reconcile - msgIndex, err = NewMsgIndex(context.Background(), tmp+"/msgindex.db", cs) - require.NoError(t, err) - - defer msgIndex.Close() //nolint - - t.Log("verifying index") - // need to step one up because the last tipset is not known by the index - cs.setHead(reorgmeParent) - verifyIndex(t, cs, msgIndex) - - t.Log("verifying that reorged and unknown messages are not present") - verifyMissing(t, cs, msgIndex, reorgme, reorgmeChild) -} - -func verifyIndex(t *testing.T, cs *mockChainStore, msgIndex MsgIndex) { - for ts := cs.curTs; ts.Height() > 0; { - t.Logf("verify at height %d", ts.Height()) - blks := ts.Blocks() - if len(blks) == 0 { - break - } - - tsCid, err := ts.Key().Cid() - require.NoError(t, err) - - msgs, err := cs.MessagesForTipset(context.Background(), ts) - require.NoError(t, err) - for _, m := range msgs { - minfo, err := msgIndex.GetMsgInfo(context.Background(), m.Cid()) - require.NoError(t, err) - require.Equal(t, tsCid, minfo.TipSet) - require.Equal(t, ts.Height(), minfo.Epoch) - } - - parents := ts.Parents() - ts, err = cs.GetTipSetFromKey(context.Background(), parents) - require.NoError(t, err) - } -} - -func verifyMissing(t *testing.T, cs *mockChainStore, msgIndex MsgIndex, missing ...*types.TipSet) { - for _, ts := range missing { - msgs, err := cs.MessagesForTipset(context.Background(), ts) - require.NoError(t, err) - for _, m := range msgs { - _, err := msgIndex.GetMsgInfo(context.Background(), m.Cid()) - require.Equal(t, ErrNotFound, err) - } - } -} - -type mockChainStore struct { - notify store.ReorgNotifee - - curTs *types.TipSet - tipsets map[types.TipSetKey]*types.TipSet - msgs map[types.TipSetKey][]types.ChainMsg - - nonce uint64 -} - -var _ ChainStore = (*mockChainStore)(nil) - -var systemAddr address.Address -var rng *rand.Rand - -func init() { - systemAddr, _ = address.NewIDAddress(0) - rng = rand.New(rand.NewSource(314159)) - - // adjust those to make tests snappy - CoalesceMinDelay = 100 * time.Millisecond - CoalesceMaxDelay = time.Second - CoalesceMergeInterval = 100 * time.Millisecond -} - -func newMockChainStore() *mockChainStore { - return &mockChainStore{ - tipsets: make(map[types.TipSetKey]*types.TipSet), - msgs: make(map[types.TipSetKey][]types.ChainMsg), - } -} - -func (cs *mockChainStore) genesis() { - genBlock := mock.MkBlock(nil, 0, 0) - genTs := mock.TipSet(genBlock) - cs.msgs[genTs.Key()] = nil - cs.setHead(genTs) -} - -func (cs *mockChainStore) setHead(ts *types.TipSet) { - cs.curTs = ts - cs.tipsets[ts.Key()] = ts -} - -func (cs *mockChainStore) advance() error { - ts := cs.makeBlk() - return cs.reorg(nil, []*types.TipSet{ts}) -} - -func (cs *mockChainStore) reorg(rev, app []*types.TipSet) error { - for _, ts := range rev { - parents := ts.Parents() - cs.curTs = cs.tipsets[parents] - } - - for _, ts := range app { - cs.tipsets[ts.Key()] = ts - cs.curTs = ts - } - - if cs.notify != nil { - return cs.notify(rev, app) - } - - return nil -} - -func (cs *mockChainStore) makeBlk() *types.TipSet { - height := cs.curTs.Height() + 1 - - blk := mock.MkBlock(cs.curTs, uint64(height), uint64(height)) - blk.Messages = cs.makeGarbageCid() - - ts := mock.TipSet(blk) - msg1 := cs.makeMsg() - msg2 := cs.makeMsg() - cs.msgs[ts.Key()] = []types.ChainMsg{msg1, msg2} - - return ts -} - -func (cs *mockChainStore) makeMsg() *types.Message { - nonce := cs.nonce - cs.nonce++ - return &types.Message{To: systemAddr, From: systemAddr, Nonce: nonce} -} - -func (cs *mockChainStore) makeGarbageCid() cid.Cid { - garbage := blocks.NewBlock([]byte{byte(rng.Intn(256)), byte(rng.Intn(256)), byte(rng.Intn(256))}) - return garbage.Cid() -} - -func (cs *mockChainStore) SubscribeHeadChanges(f store.ReorgNotifee) { - cs.notify = f -} - -func (cs *mockChainStore) MessagesForTipset(ctx context.Context, ts *types.TipSet) ([]types.ChainMsg, error) { - msgs, ok := cs.msgs[ts.Key()] - if !ok { - return nil, errors.New("unknown tipset") - } - - return msgs, nil -} - -func (cs *mockChainStore) GetHeaviestTipSet() *types.TipSet { - return cs.curTs -} - -func (cs *mockChainStore) GetTipSetFromKey(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) { - ts, ok := cs.tipsets[tsk] - if !ok { - return nil, errors.New("unknown tipset") - } - return ts, nil -} - -func waitForCoalescerAfterLastEvent() { - // It can take up to CoalesceMinDelay for the coalescer timer to fire after the last event. - // When the timer fires, it can wait up to CoalesceMinDelay again for more events. - // Therefore the total wait is 2 * CoalesceMinDelay. - // Then we wait another second for the listener (the index) to actually process events. - time.Sleep(2*CoalesceMinDelay + time.Second) -} diff --git a/chain/index/pub_sub.go b/chain/index/pub_sub.go new file mode 100644 index 00000000000..a8dd8d05b7b --- /dev/null +++ b/chain/index/pub_sub.go @@ -0,0 +1,59 @@ +package index + +import "context" + +type updateSub struct { + ctx context.Context + cancel context.CancelFunc + + ch chan chainIndexUpdated +} + +type chainIndexUpdated struct{} + +func (si *SqliteIndexer) subscribeUpdates() (chan chainIndexUpdated, func()) { + subCtx, subCancel := context.WithCancel(si.ctx) + ch := make(chan chainIndexUpdated) + + si.mu.Lock() + subId := si.subIdCounter + si.subIdCounter++ + si.updateSubs[subId] = &updateSub{ + ctx: subCtx, + cancel: subCancel, + ch: ch, + } + si.mu.Unlock() + + unSubscribeF := func() { + si.mu.Lock() + if sub, ok := si.updateSubs[subId]; ok { + sub.cancel() + delete(si.updateSubs, subId) + } + si.mu.Unlock() + } + + return ch, unSubscribeF +} + +func (si *SqliteIndexer) notifyUpdateSubs() { + si.mu.Lock() + tSubs := make([]*updateSub, 0, len(si.updateSubs)) + for _, tSub := range si.updateSubs { + tSub := tSub + tSubs = append(tSubs, tSub) + } + si.mu.Unlock() + + for _, tSub := range tSubs { + tSub := tSub + select { + case tSub.ch <- chainIndexUpdated{}: + case <-tSub.ctx.Done(): + // subscription was cancelled, ignore + case <-si.ctx.Done(): + return + } + } +} diff --git a/chain/index/read.go b/chain/index/read.go new file mode 100644 index 00000000000..d7c00bd35cf --- /dev/null +++ b/chain/index/read.go @@ -0,0 +1,133 @@ +package index + +import ( + "context" + "database/sql" + "time" + + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/chain/types/ethtypes" +) + +const headIndexedWaitTimeout = 5 * time.Second + +func (si *SqliteIndexer) GetCidFromHash(ctx context.Context, txHash ethtypes.EthHash) (cid.Cid, error) { + if si.isClosed() { + return cid.Undef, ErrClosed + } + + var msgCidBytes []byte + + if err := si.readWithHeadIndexWait(ctx, func() error { + return si.queryMsgCidFromEthHash(ctx, txHash, &msgCidBytes) + }); err != nil { + return cid.Undef, err + } + + msgCid, err := cid.Cast(msgCidBytes) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to cast message CID: %w", err) + } + + return msgCid, nil +} + +func (si *SqliteIndexer) queryMsgCidFromEthHash(ctx context.Context, txHash ethtypes.EthHash, msgCidBytes *[]byte) error { + return si.stmts.getMsgCidFromEthHashStmt.QueryRowContext(ctx, txHash.String()).Scan(msgCidBytes) +} + +func (si *SqliteIndexer) GetMsgInfo(ctx context.Context, messageCid cid.Cid) (*MsgInfo, error) { + if si.isClosed() { + return nil, ErrClosed + } + + var tipsetKeyCidBytes []byte + var height int64 + + if err := si.queryMsgInfo(ctx, messageCid, &tipsetKeyCidBytes, &height); err != nil { + return nil, err + } + + tipsetKey, err := cid.Cast(tipsetKeyCidBytes) + if err != nil { + return nil, xerrors.Errorf("failed to cast tipset key cid: %w", err) + } + + return &MsgInfo{ + Message: messageCid, + TipSet: tipsetKey, + Epoch: abi.ChainEpoch(height), + }, nil +} + +// This function attempts to read data using the provided readFunc. +// If the initial read returns no rows, it waits for the head to be indexed +// and tries again. This ensures that the most up-to-date data is checked. +// If no data is found after the second attempt, it returns ErrNotFound. +func (si *SqliteIndexer) readWithHeadIndexWait(ctx context.Context, readFunc func() error) error { + err := readFunc() + if err == sql.ErrNoRows { + // not found, but may be in latest head, so wait for it and check again + if err := si.waitTillHeadIndexed(ctx); err != nil { + return xerrors.Errorf("failed while waiting for head to be indexed: %w", err) + } + err = readFunc() + } + + if err != nil { + if err == sql.ErrNoRows { + return ErrNotFound + } + return xerrors.Errorf("failed to read data from index: %w", err) + } + + return nil +} + +func (si *SqliteIndexer) queryMsgInfo(ctx context.Context, messageCid cid.Cid, tipsetKeyCidBytes *[]byte, height *int64) error { + return si.stmts.getNonRevertedMsgInfoStmt.QueryRowContext(ctx, messageCid.Bytes()).Scan(tipsetKeyCidBytes, height) +} + +func (si *SqliteIndexer) waitTillHeadIndexed(ctx context.Context) error { + ctx, cancel := context.WithTimeout(ctx, headIndexedWaitTimeout) + defer cancel() + + head := si.cs.GetHeaviestTipSet() + headTsKeyCidBytes, err := toTipsetKeyCidBytes(head) + if err != nil { + return xerrors.Errorf("failed to get tipset key cid: %w", err) + } + + // wait till it is indexed + subCh, unsubFn := si.subscribeUpdates() + defer unsubFn() + + for ctx.Err() == nil { + exists, err := si.isTipsetIndexed(ctx, headTsKeyCidBytes) + if err != nil { + return xerrors.Errorf("failed to check if tipset exists: %w", err) + } else if exists { + return nil + } + + select { + case <-subCh: + // Continue to next iteration to check again + case <-ctx.Done(): + return ctx.Err() + } + } + return ctx.Err() +} + +func (si *SqliteIndexer) isTipsetIndexed(ctx context.Context, tsKeyCidBytes []byte) (bool, error) { + var exists bool + if err := si.stmts.hasTipsetStmt.QueryRowContext(ctx, tsKeyCidBytes).Scan(&exists); err != nil { + return false, xerrors.Errorf("failed to check if tipset is indexed: %w", err) + } + return exists, nil +} diff --git a/chain/index/read_test.go b/chain/index/read_test.go new file mode 100644 index 00000000000..ebb6d1acf41 --- /dev/null +++ b/chain/index/read_test.go @@ -0,0 +1,292 @@ +package index + +import ( + "context" + "errors" + pseudo "math/rand" + "sync" + "testing" + "time" + + "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/types/ethtypes" +) + +func TestGetCidFromHash(t *testing.T) { + seed := time.Now().UnixNano() + t.Logf("seed: %d", seed) + rng := pseudo.New(pseudo.NewSource(seed)) + ctx := context.Background() + + s, _, _ := setupWithHeadIndexed(t, 10, rng) + + ethTxHash := ethtypes.EthHash([32]byte{1}) + msgCid := randomCid(t, rng) + + // read from empty db -> ErrNotFound + c, err := s.GetCidFromHash(ctx, ethTxHash) + require.Error(t, err) + require.True(t, errors.Is(err, ErrNotFound)) + require.EqualValues(t, cid.Undef, c) + + // insert and read + insertEthTxHash(t, s, ethTxHash, msgCid) + c, err = s.GetCidFromHash(ctx, ethTxHash) + require.NoError(t, err) + require.EqualValues(t, msgCid, c) + + // look up some other hash -> fails + c, err = s.GetCidFromHash(ctx, ethtypes.EthHash([32]byte{2})) + require.Error(t, err) + require.True(t, errors.Is(err, ErrNotFound)) + require.EqualValues(t, cid.Undef, c) +} + +func TestGetMsgInfo(t *testing.T) { + ctx := context.Background() + seed := time.Now().UnixNano() + t.Logf("seed: %d", seed) + rng := pseudo.New(pseudo.NewSource(seed)) + s, _, _ := setupWithHeadIndexed(t, 10, rng) + msgCid := randomCid(t, rng) + msgCidBytes := msgCid.Bytes() + tsKeyCid := randomCid(t, rng) + + insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: tsKeyCid.Bytes(), + height: uint64(1), + reverted: false, + messageCid: msgCidBytes, + messageIndex: 1, + }) + + mi, err := s.GetMsgInfo(ctx, msgCid) + require.NoError(t, err) + require.Equal(t, msgCid, mi.Message) + require.Equal(t, tsKeyCid, mi.TipSet) + require.Equal(t, abi.ChainEpoch(1), mi.Epoch) +} + +func setupWithHeadIndexed(t *testing.T, headHeight abi.ChainEpoch, rng *pseudo.Rand) (*SqliteIndexer, *types.TipSet, *dummyChainStore) { + head := fakeTipSet(t, rng, headHeight, []cid.Cid{}) + d := newDummyChainStore() + d.SetHeaviestTipSet(head) + + s, err := NewSqliteIndexer(":memory:", d, 0, false, 0) + require.NoError(t, err) + insertHead(t, s, head, headHeight) + + return s, head, d +} + +func insertHead(t *testing.T, s *SqliteIndexer, head *types.TipSet, height abi.ChainEpoch) { + headKeyBytes, err := toTipsetKeyCidBytes(head) + require.NoError(t, err) + + insertTipsetMessage(t, s, tipsetMessage{ + tipsetKeyCid: headKeyBytes, + height: uint64(height), + reverted: false, + messageCid: nil, + messageIndex: -1, + }) +} + +func insertEthTxHash(t *testing.T, s *SqliteIndexer, ethTxHash ethtypes.EthHash, messageCid cid.Cid) { + msgCidBytes := messageCid.Bytes() + + res, err := s.stmts.insertEthTxHashStmt.Exec(ethTxHash.String(), msgCidBytes) + require.NoError(t, err) + rowsAffected, err := res.RowsAffected() + require.NoError(t, err) + require.Equal(t, int64(1), rowsAffected) +} + +type dummyChainStore struct { + mu sync.RWMutex + + heightToTipSet map[abi.ChainEpoch]*types.TipSet + messagesForTipset map[*types.TipSet][]types.ChainMsg + keyToTipSet map[types.TipSetKey]*types.TipSet + tipsetCidToTipset map[cid.Cid]*types.TipSet + + heaviestTipSet *types.TipSet + messagesForBlock func(ctx context.Context, b *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) + actorStore func(ctx context.Context) adt.Store +} + +func newDummyChainStore() *dummyChainStore { + return &dummyChainStore{ + heightToTipSet: make(map[abi.ChainEpoch]*types.TipSet), + messagesForTipset: make(map[*types.TipSet][]types.ChainMsg), + keyToTipSet: make(map[types.TipSetKey]*types.TipSet), + tipsetCidToTipset: make(map[cid.Cid]*types.TipSet), + } +} + +func (d *dummyChainStore) MessagesForTipset(ctx context.Context, ts *types.TipSet) ([]types.ChainMsg, error) { + d.mu.RLock() + defer d.mu.RUnlock() + + msgs, ok := d.messagesForTipset[ts] + if !ok { + return nil, nil + } + return msgs, nil +} + +func (d *dummyChainStore) GetHeaviestTipSet() *types.TipSet { + d.mu.RLock() + defer d.mu.RUnlock() + return d.heaviestTipSet +} + +func (d *dummyChainStore) GetTipSetByCid(_ context.Context, tsKeyCid cid.Cid) (*types.TipSet, error) { + d.mu.RLock() + defer d.mu.RUnlock() + if _, ok := d.tipsetCidToTipset[tsKeyCid]; !ok { + return nil, errors.New("not found") + } + return d.tipsetCidToTipset[tsKeyCid], nil +} + +func (d *dummyChainStore) SetTipSetByCid(t *testing.T, ts *types.TipSet) { + d.mu.Lock() + defer d.mu.Unlock() + + tsKeyCid, err := ts.Key().Cid() + require.NoError(t, err) + d.tipsetCidToTipset[tsKeyCid] = ts +} + +func (d *dummyChainStore) GetTipSetFromKey(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) { + d.mu.RLock() + defer d.mu.RUnlock() + + return d.keyToTipSet[tsk], nil +} + +func (d *dummyChainStore) MessagesForBlock(ctx context.Context, b *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) { + d.mu.RLock() + defer d.mu.RUnlock() + if d.messagesForBlock != nil { + return d.messagesForBlock(ctx, b) + } + return nil, nil, nil +} + +func (d *dummyChainStore) ActorStore(ctx context.Context) adt.Store { + d.mu.RLock() + defer d.mu.RUnlock() + if d.actorStore != nil { + return d.actorStore(ctx) + } + return nil +} + +func (d *dummyChainStore) GetTipsetByHeight(ctx context.Context, h abi.ChainEpoch, _ *types.TipSet, prev bool) (*types.TipSet, error) { + d.mu.RLock() + defer d.mu.RUnlock() + + ts, ok := d.heightToTipSet[h] + if !ok { + return nil, errors.New("tipset not found") + } + return ts, nil +} + +func (d *dummyChainStore) IsStoringEvents() bool { + return true +} + +// Setter methods to configure the mock + +func (d *dummyChainStore) SetMessagesForTipset(ts *types.TipSet, msgs []types.ChainMsg) { + d.mu.Lock() + defer d.mu.Unlock() + d.messagesForTipset[ts] = msgs +} + +func (d *dummyChainStore) SetHeaviestTipSet(ts *types.TipSet) { + d.mu.Lock() + defer d.mu.Unlock() + d.heaviestTipSet = ts +} + +func (d *dummyChainStore) SetTipsetByHeightAndKey(h abi.ChainEpoch, tsk types.TipSetKey, ts *types.TipSet) { + d.mu.Lock() + defer d.mu.Unlock() + + d.heightToTipSet[h] = ts + d.keyToTipSet[tsk] = ts +} + +func randomIDAddr(tb testing.TB, rng *pseudo.Rand) address.Address { + tb.Helper() + addr, err := address.NewIDAddress(uint64(rng.Int63())) + require.NoError(tb, err) + return addr +} + +func randomCid(tb testing.TB, rng *pseudo.Rand) cid.Cid { + tb.Helper() + cb := cid.V1Builder{Codec: cid.Raw, MhType: mh.IDENTITY} + c, err := cb.Sum(randomBytes(10, rng)) + require.NoError(tb, err) + return c +} + +func randomBytes(n int, rng *pseudo.Rand) []byte { + buf := make([]byte, n) + rng.Read(buf) + return buf +} + +func fakeTipSet(tb testing.TB, rng *pseudo.Rand, h abi.ChainEpoch, parents []cid.Cid) *types.TipSet { + tb.Helper() + ts, err := types.NewTipSet([]*types.BlockHeader{ + { + Height: h, + Miner: randomIDAddr(tb, rng), + + Parents: parents, + + Ticket: &types.Ticket{VRFProof: []byte{byte(h % 2)}}, + + ParentStateRoot: randomCid(tb, rng), + Messages: randomCid(tb, rng), + ParentMessageReceipts: randomCid(tb, rng), + + BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS}, + BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS}, + }, + { + Height: h, + Miner: randomIDAddr(tb, rng), + + Parents: parents, + + Ticket: &types.Ticket{VRFProof: []byte{byte((h + 1) % 2)}}, + + ParentStateRoot: randomCid(tb, rng), + Messages: randomCid(tb, rng), + ParentMessageReceipts: randomCid(tb, rng), + + BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS}, + BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS}, + }, + }) + + require.NoError(tb, err) + + return ts +} diff --git a/chain/index/reconcile.go b/chain/index/reconcile.go new file mode 100644 index 00000000000..72a1e6ecaa6 --- /dev/null +++ b/chain/index/reconcile.go @@ -0,0 +1,276 @@ +package index + +import ( + "context" + "database/sql" + + ipld "github.com/ipfs/go-ipld-format" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/types" +) + +// ReconcileWithChain ensures that the index is consistent with the current chain state. +// It performs the following steps: +// 1. Checks if the index is empty. If so, it returns immediately as there's nothing to reconcile. +// 2. Finds the lowest non-reverted height in the index. +// 3. Walks backwards from the current chain head until it finds a tipset that exists +// in the index and is not marked as reverted. +// 4. Sets a boundary epoch just above this found tipset. +// 5. Marks all tipsets above this boundary as reverted, ensuring consistency with the current chain state. +// 6. Applies all missing un-indexed tipsets starting from the last matching tipset b/w index and canonical chain +// to the current chain head. +// +// This function is crucial for maintaining index integrity, especially after chain reorgs. +// It ensures that the index accurately reflects the current state of the blockchain. +func (si *SqliteIndexer) ReconcileWithChain(ctx context.Context, head *types.TipSet) error { + if !si.cs.IsStoringEvents() { + log.Warn("chain indexer is not storing events during reconciliation; please ensure this is intentional") + } + + if si.isClosed() { + return ErrClosed + } + + if head == nil { + return nil + } + + return withTx(ctx, si.db, func(tx *sql.Tx) error { + var isIndexEmpty bool + err := tx.StmtContext(ctx, si.stmts.isIndexEmptyStmt).QueryRowContext(ctx).Scan(&isIndexEmpty) + if err != nil { + return xerrors.Errorf("failed to check if index is empty: %w", err) + } + + if isIndexEmpty && !si.reconcileEmptyIndex { + log.Info("chain index is empty and reconcileEmptyIndex is disabled; skipping reconciliation") + return nil + } + + if isIndexEmpty { + log.Info("chain index is empty; backfilling from head") + return si.backfillIndex(ctx, tx, head, 0) + } + + reconciliationEpoch, err := si.getReconciliationEpoch(ctx, tx) + if err != nil { + return xerrors.Errorf("failed to get reconciliation epoch: %w", err) + } + + currTs := head + + log.Infof("starting chain reconciliation from head height %d; reconciliation epoch is %d", head.Height(), reconciliationEpoch) + + // The goal here is to walk the canonical chain backwards from head until we find a matching non-reverted tipset + // in the db so we know where to start reconciliation from + // All tipsets that exist in the DB but not in the canonical chain are then marked as reverted + // All tipsets that exist in the canonical chain but not in the db are then applied + + // we only need to walk back as far as the reconciliation epoch as all the tipsets in the index + // below the reconciliation epoch are already marked as reverted because the reconciliation epoch + // is the minimum non-reverted height in the index + for currTs != nil && currTs.Height() >= reconciliationEpoch { + tsKeyCidBytes, err := toTipsetKeyCidBytes(currTs) + if err != nil { + return xerrors.Errorf("failed to compute tipset cid: %w", err) + } + + var exists bool + err = tx.StmtContext(ctx, si.stmts.hasNonRevertedTipsetStmt).QueryRowContext(ctx, tsKeyCidBytes).Scan(&exists) + if err != nil { + return xerrors.Errorf("failed to check if tipset exists and is not reverted: %w", err) + } + + if exists { + // found it! + reconciliationEpoch = currTs.Height() + 1 + log.Infof("found matching tipset at height %d, setting reconciliation epoch to %d", currTs.Height(), reconciliationEpoch) + break + } + + if currTs.Height() == 0 { + log.Infof("ReconcileWithChain reached genesis but no matching tipset found in index") + break + } + + parents := currTs.Parents() + currTs, err = si.cs.GetTipSetFromKey(ctx, parents) + if err != nil { + return xerrors.Errorf("failed to walk chain: %w", err) + } + } + + if currTs.Height() == 0 { + log.Warn("ReconcileWithChain reached genesis without finding matching tipset") + } + + // mark all tipsets from the reconciliation epoch onwards in the Index as reverted as they are not in the current canonical chain + log.Infof("Marking tipsets as reverted from height %d", reconciliationEpoch) + result, err := tx.StmtContext(ctx, si.stmts.updateTipsetsToRevertedFromHeightStmt).ExecContext(ctx, int64(reconciliationEpoch)) + if err != nil { + return xerrors.Errorf("failed to mark tipsets as reverted: %w", err) + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return xerrors.Errorf("failed to get number of rows affected: %w", err) + } + + // also need to mark events as reverted for the corresponding inclusion tipsets + if _, err = tx.StmtContext(ctx, si.stmts.updateEventsToRevertedFromHeightStmt).ExecContext(ctx, int64(reconciliationEpoch-1)); err != nil { + return xerrors.Errorf("failed to mark events as reverted: %w", err) + } + + log.Infof("marked %d tipsets as reverted from height %d", rowsAffected, reconciliationEpoch) + + // if the head is less than the reconciliation epoch, we don't need to index any tipsets as we're already caught up + if head.Height() < reconciliationEpoch { + log.Info("no missing tipsets to index; index is already caught up with chain") + return nil + } + + // apply all missing tipsets by walking the chain backwards starting from head upto the reconciliation epoch + log.Infof("indexing missing tipsets backwards from head height %d to reconciliation epoch %d", head.Height(), reconciliationEpoch) + + // if head.Height == reconciliationEpoch, this will only index head and return + if err := si.backfillIndex(ctx, tx, head, reconciliationEpoch); err != nil { + return xerrors.Errorf("failed to backfill index: %w", err) + } + + return nil + }) +} + +func (si *SqliteIndexer) getReconciliationEpoch(ctx context.Context, tx *sql.Tx) (abi.ChainEpoch, error) { + var reconciliationEpochInIndex sql.NullInt64 + + err := tx.StmtContext(ctx, si.stmts.getMinNonRevertedHeightStmt). + QueryRowContext(ctx). + Scan(&reconciliationEpochInIndex) + + if err != nil { + if err == sql.ErrNoRows { + log.Warn("index only contains reverted tipsets; setting reconciliation epoch to 0") + return 0, nil + } + return 0, xerrors.Errorf("failed to scan minimum non-reverted height: %w", err) + } + + if !reconciliationEpochInIndex.Valid { + log.Warn("index only contains reverted tipsets; setting reconciliation epoch to 0") + return 0, nil + } + + return abi.ChainEpoch(reconciliationEpochInIndex.Int64), nil +} + +// backfillIndex backfills the chain index with missing tipsets starting from the given head tipset +// and stopping after the specified stopAfter epoch (inclusive). +// +// The behavior of this function depends on the relationship between head.Height and stopAfter: +// +// 1. If head.Height > stopAfter: +// - The function will apply missing tipsets from head.Height down to stopAfter (inclusive). +// - It will stop applying tipsets if the maximum number of tipsets to apply (si.maxReconcileTipsets) is reached. +// - If the chain store only contains data up to a certain height, the function will stop backfilling at that height. +// +// 2. If head.Height == stopAfter: +// - The function will only apply the head tipset and then return. +// +// 3. If head.Height < stopAfter: +// - The function will immediately return without applying any tipsets. +// +// The si.maxReconcileTipsets parameter is used to limit the maximum number of tipsets that can be applied during the backfill process. +// If the number of applied tipsets reaches si.maxReconcileTipsets, the function will stop backfilling and return. +// +// The function also logs progress information at regular intervals (every builtin.EpochsInDay) to provide visibility into the backfill process. +func (si *SqliteIndexer) backfillIndex(ctx context.Context, tx *sql.Tx, head *types.TipSet, stopAfter abi.ChainEpoch) error { + if head.Height() < stopAfter { + return nil + } + + currTs := head + totalApplied := uint64(0) + lastLoggedEpoch := head.Height() + + log.Infof("backfilling chain index backwards starting from head height %d", head.Height()) + + // Calculate the actual number of tipsets to apply + totalTipsetsToApply := min(uint64(head.Height()-stopAfter+1), si.maxReconcileTipsets) + + for currTs != nil { + if totalApplied >= si.maxReconcileTipsets { + log.Infof("reached maximum number of tipsets to apply (%d), finishing backfill; backfill applied %d tipsets", + si.maxReconcileTipsets, totalApplied) + return nil + } + + err := si.applyMissingTipset(ctx, tx, currTs) + if err != nil { + if ipld.IsNotFound(err) { + log.Infof("stopping backfill at height %d as chain store only contains data up to this height as per error %s; backfill applied %d tipsets", + currTs.Height(), err, totalApplied) + return nil + } + + return xerrors.Errorf("failed to apply tipset at height %d: %w", currTs.Height(), err) + } + + totalApplied++ + + if lastLoggedEpoch-currTs.Height() >= builtin.EpochsInDay { + progress := float64(totalApplied) / float64(totalTipsetsToApply) * 100 + log.Infof("backfill progress: %.2f%% complete (%d out of %d tipsets applied), ongoing", progress, totalApplied, totalTipsetsToApply) + lastLoggedEpoch = currTs.Height() + } + + if currTs.Height() == 0 { + log.Infof("reached genesis tipset and have backfilled everything up to genesis; backfilled %d tipsets", totalApplied) + return nil + } + + if currTs.Height() <= stopAfter { + log.Infof("reached stop height %d; backfilled %d tipsets", stopAfter, totalApplied) + return nil + } + + currTs, err = si.cs.GetTipSetFromKey(ctx, currTs.Parents()) + if err != nil { + return xerrors.Errorf("failed to walk chain at height %d: %w", currTs.Height(), err) + } + } + + log.Infof("applied %d tipsets during backfill", totalApplied) + return nil +} + +// applyMissingTipset indexes a single missing tipset and its parent events +// It's a simplified version of applyMissingTipsets, handling one tipset at a time +func (si *SqliteIndexer) applyMissingTipset(ctx context.Context, tx *sql.Tx, currTs *types.TipSet) error { + if currTs == nil { + return xerrors.Errorf("failed to apply missing tipset: tipset is nil") + } + + // Special handling for genesis tipset + if currTs.Height() == 0 { + if err := si.indexTipset(ctx, tx, currTs); err != nil { + return xerrors.Errorf("failed to index genesis tipset: %w", err) + } + return nil + } + + parentTs, err := si.cs.GetTipSetFromKey(ctx, currTs.Parents()) + if err != nil { + return xerrors.Errorf("failed to get parent tipset: %w", err) + } + + // Index the tipset along with its parent events + if err := si.indexTipsetWithParentEvents(ctx, tx, parentTs, currTs); err != nil { + return xerrors.Errorf("failed to index tipset with parent events: %w", err) + } + + return nil +} diff --git a/chain/stmgr/forks_test.go b/chain/stmgr/forks_test.go index 8c022755371..1c9183731f3 100644 --- a/chain/stmgr/forks_test.go +++ b/chain/stmgr/forks_test.go @@ -36,7 +36,6 @@ import ( "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" "github.com/filecoin-project/lotus/chain/gen" - "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/stmgr" . "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" @@ -169,7 +168,7 @@ func TestForkHeightTriggers(t *testing.T) { } return st.Flush(ctx) - }}}, cg.BeaconSchedule(), datastore.NewMapDatastore(), index.DummyMsgIndex) + }}}, cg.BeaconSchedule(), datastore.NewMapDatastore(), nil) if err != nil { t.Fatal(err) } @@ -287,7 +286,7 @@ func testForkRefuseCall(t *testing.T, nullsBefore, nullsAfter int) { root cid.Cid, height abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { migrationCount++ return root, nil - }}}, cg.BeaconSchedule(), datastore.NewMapDatastore(), index.DummyMsgIndex) + }}}, cg.BeaconSchedule(), datastore.NewMapDatastore(), nil) if err != nil { t.Fatal(err) } @@ -519,7 +518,7 @@ func TestForkPreMigration(t *testing.T) { return nil }, }}}, - }, cg.BeaconSchedule(), datastore.NewMapDatastore(), index.DummyMsgIndex) + }, cg.BeaconSchedule(), datastore.NewMapDatastore(), nil) if err != nil { t.Fatal(err) } @@ -594,7 +593,7 @@ func TestDisablePreMigration(t *testing.T) { }, cg.BeaconSchedule(), datastore.NewMapDatastore(), - index.DummyMsgIndex, + nil, ) require.NoError(t, err) require.NoError(t, sm.Start(context.Background())) @@ -649,7 +648,7 @@ func TestMigrtionCache(t *testing.T) { }, cg.BeaconSchedule(), metadataDs, - index.DummyMsgIndex, + nil, ) require.NoError(t, err) require.NoError(t, sm.Start(context.Background())) @@ -702,7 +701,7 @@ func TestMigrtionCache(t *testing.T) { }, cg.BeaconSchedule(), metadataDs, - index.DummyMsgIndex, + nil, ) require.NoError(t, err) sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (vm.Interface, error) { diff --git a/chain/stmgr/searchwait.go b/chain/stmgr/searchwait.go index 3377389b9b6..288a5bfd082 100644 --- a/chain/stmgr/searchwait.go +++ b/chain/stmgr/searchwait.go @@ -190,7 +190,10 @@ func (sm *StateManager) SearchForMessage(ctx context.Context, head *types.TipSet } func (sm *StateManager) searchForIndexedMsg(ctx context.Context, mcid cid.Cid, m types.ChainMsg) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) { - minfo, err := sm.msgIndex.GetMsgInfo(ctx, mcid) + if sm.chainIndexer == nil { + return nil, nil, cid.Undef, index.ErrNotFound + } + minfo, err := sm.chainIndexer.GetMsgInfo(ctx, mcid) if err != nil { return nil, nil, cid.Undef, xerrors.Errorf("error looking up message in index: %w", err) } diff --git a/chain/stmgr/stmgr.go b/chain/stmgr/stmgr.go index 49be6fdaec4..5b227fe922e 100644 --- a/chain/stmgr/stmgr.go +++ b/chain/stmgr/stmgr.go @@ -156,7 +156,7 @@ type StateManager struct { tsExecMonitor ExecMonitor beacon beacon.Schedule - msgIndex index.MsgIndex + chainIndexer index.Indexer // We keep a small cache for calls to ExecutionTrace which helps improve // performance for node operators like exchanges and block explorers @@ -177,7 +177,8 @@ type tipSetCacheEntry struct { invocTrace []*api.InvocResult } -func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, beacon beacon.Schedule, metadataDs dstore.Batching, msgIndex index.MsgIndex) (*StateManager, error) { +func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, beacon beacon.Schedule, + metadataDs dstore.Batching, chainIndexer index.Indexer) (*StateManager, error) { // If we have upgrades, make sure they're in-order and make sense. if err := us.Validate(); err != nil { return nil, err @@ -242,13 +243,13 @@ func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, tree: nil, }, compWait: make(map[string]chan struct{}), - msgIndex: msgIndex, + chainIndexer: chainIndexer, execTraceCache: execTraceCache, }, nil } -func NewStateManagerWithUpgradeScheduleAndMonitor(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, b beacon.Schedule, em ExecMonitor, metadataDs dstore.Batching, msgIndex index.MsgIndex) (*StateManager, error) { - sm, err := NewStateManager(cs, exec, sys, us, b, metadataDs, msgIndex) +func NewStateManagerWithUpgradeScheduleAndMonitor(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, b beacon.Schedule, em ExecMonitor, metadataDs dstore.Batching, chainIndexer index.Indexer) (*StateManager, error) { + sm, err := NewStateManager(cs, exec, sys, us, b, metadataDs, chainIndexer) if err != nil { return nil, err } diff --git a/chain/store/store_test.go b/chain/store/store_test.go index 1ecfc474a02..81569149c01 100644 --- a/chain/store/store_test.go +++ b/chain/store/store_test.go @@ -19,7 +19,6 @@ import ( "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" "github.com/filecoin-project/lotus/chain/gen" - "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" @@ -216,7 +215,7 @@ func TestChainExportImportFull(t *testing.T) { t.Fatal("imported chain differed from exported chain") } - sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), nil, filcns.DefaultUpgradeSchedule(), cg.BeaconSchedule(), ds, index.DummyMsgIndex) + sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), nil, filcns.DefaultUpgradeSchedule(), cg.BeaconSchedule(), ds, nil) if err != nil { t.Fatal(err) } diff --git a/chain/types/index.go b/chain/types/index.go new file mode 100644 index 00000000000..93c44ad2e43 --- /dev/null +++ b/chain/types/index.go @@ -0,0 +1,23 @@ +package types + +import ( + "github.com/filecoin-project/go-state-types/abi" +) + +// IndexValidation contains detailed information about the validation status of a specific chain epoch. +type IndexValidation struct { + // TipSetKey is the key of the canonical tipset for this epoch. + TipSetKey TipSetKey + // Height is the epoch height at which the validation is performed. + Height abi.ChainEpoch + // IndexedMessagesCount is the number of indexed messages for the canonical tipset at this epoch. + IndexedMessagesCount uint64 + // IndexedEventsCount is the number of indexed events for the canonical tipset at this epoch. + IndexedEventsCount uint64 + // IndexedEventEntriesCount is the number of indexed event entries for the canonical tipset at this epoch. + IndexedEventEntriesCount uint64 + // Backfilled denotes whether missing data was successfully backfilled into the index during validation. + Backfilled bool + // IsNullRound indicates if the epoch corresponds to a null round and therefore does not have any indexed messages or events. + IsNullRound bool +} diff --git a/cmd/lotus-bench/import.go b/cmd/lotus-bench/import.go index 500ef4af3ed..46f2411bf86 100644 --- a/cmd/lotus-bench/import.go +++ b/cmd/lotus-bench/import.go @@ -35,7 +35,6 @@ import ( badgerbs "github.com/filecoin-project/lotus/blockstore/badger" "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" - "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/proofs" proofsffi "github.com/filecoin-project/lotus/chain/proofs/ffi" "github.com/filecoin-project/lotus/chain/stmgr" @@ -229,7 +228,8 @@ var importBenchCmd = &cli.Command{ defer cs.Close() //nolint:errcheck // TODO: We need to supply the actual beacon after v14 - stm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(verifier), filcns.DefaultUpgradeSchedule(), nil, metadataDs, index.DummyMsgIndex) + stm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(verifier), filcns.DefaultUpgradeSchedule(), nil, + metadataDs, nil) if err != nil { return err } diff --git a/cmd/lotus-shed/balances.go b/cmd/lotus-shed/balances.go index 8d003950fe1..b5b55a8ce18 100644 --- a/cmd/lotus-shed/balances.go +++ b/cmd/lotus-shed/balances.go @@ -35,7 +35,6 @@ import ( "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" "github.com/filecoin-project/lotus/chain/gen/genesis" - "github.com/filecoin-project/lotus/chain/index" proofsffi "github.com/filecoin-project/lotus/chain/proofs/ffi" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/stmgr" @@ -514,7 +513,7 @@ var chainBalanceStateCmd = &cli.Command{ cst := cbor.NewCborStore(bs) store := adt.WrapStore(ctx, cst) - sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, index.DummyMsgIndex) + sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, nil) if err != nil { return err } @@ -738,7 +737,8 @@ var chainPledgeCmd = &cli.Command{ cst := cbor.NewCborStore(bs) store := adt.WrapStore(ctx, cst) - sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, index.DummyMsgIndex) + sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), + vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, nil) if err != nil { return err } diff --git a/cmd/lotus-shed/chain_index.go b/cmd/lotus-shed/chain_index.go new file mode 100644 index 00000000000..53cc06d2b61 --- /dev/null +++ b/cmd/lotus-shed/chain_index.go @@ -0,0 +1,213 @@ +package main + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" + + lcli "github.com/filecoin-project/lotus/cli" +) + +var chainIndexCmds = &cli.Command{ + Name: "chainindex", + Usage: "Commands related to managing the chainindex", + Subcommands: []*cli.Command{ + validateBackfillChainIndexCmd, + }, +} + +var validateBackfillChainIndexCmd = &cli.Command{ + Name: "validate-backfill", + Usage: "Validates and optionally backfills the chainindex for a range of epochs", + Description: ` +lotus-shed chainindex validate-backfill --from --to [--backfill] [--log-good] [--quiet] + +The command validates the chain index entries for each epoch in the specified range, checking for missing or +inconsistent entries (i.e. the indexed data does not match the actual chain state). If '--backfill' is enabled +(which it is by default), it will attempt to backfill any missing entries using the 'ChainValidateIndex' API. + +Error conditions: + - If 'from' or 'to' are invalid (<=0 or 'to' > 'from'), an error is returned. + - If the 'ChainValidateIndex' API returns an error for an epoch, indicating an inconsistency between the index + and chain state, an error message is logged for that epoch. + +Logging: + - Progress is logged every 2880 epochs (1 day worth of epochs) processed during the validation process. + - If '--log-good' is enabled, details are also logged for each epoch that has no detected problems. This includes: + - Null rounds with no messages/events. + - Epochs with a valid indexed entry. + - If --quiet is enabled, only errors are logged, unless --log-good is also enabled, in which case good tipsets + are also logged. + +Example usage: + +To validate and backfill the chain index for the last 5760 epochs (2 days) and log details for all epochs: + +lotus-shed chainindex validate-backfill --from 1000000 --to 994240 --log-good + +This command is useful for backfilling the chain index over a range of historical epochs during the migration to +the new ChainIndexer. It can also be run periodically to validate the index's integrity using system schedulers +like cron. + +If there are any errors during the validation process, the command will exit with a non-zero status and log the +number of failed RPC calls. Otherwise, it will exit with a zero status. + `, + Flags: []cli.Flag{ + &cli.IntFlag{ + Name: "from", + Usage: "from specifies the starting tipset epoch for validation (inclusive)", + Required: true, + }, + &cli.IntFlag{ + Name: "to", + Usage: "to specifies the ending tipset epoch for validation (inclusive)", + Required: true, + }, + &cli.BoolFlag{ + Name: "backfill", + Usage: "backfill determines whether to backfill missing index entries during validation (default: true)", + Value: true, + }, + &cli.BoolFlag{ + Name: "log-good", + Usage: "log tipsets that have no detected problems", + Value: false, + }, + &cli.BoolFlag{ + Name: "quiet", + Usage: "suppress output except for errors (or good tipsets if log-good is enabled)", + }, + }, + Action: func(cctx *cli.Context) error { + srv, err := lcli.GetFullNodeServices(cctx) + if err != nil { + return xerrors.Errorf("failed to get full node services: %w", err) + } + defer func() { + if closeErr := srv.Close(); closeErr != nil { + log.Errorf("error closing services: %w", closeErr) + } + }() + + api := srv.FullNodeAPI() + ctx := lcli.ReqContext(cctx) + + fromEpoch := cctx.Int("from") + if fromEpoch <= 0 { + return xerrors.Errorf("invalid from epoch: %d, must be greater than 0", fromEpoch) + } + + toEpoch := cctx.Int("to") + if toEpoch <= 0 { + return xerrors.Errorf("invalid to epoch: %d, must be greater than 0", toEpoch) + } + if toEpoch > fromEpoch { + return xerrors.Errorf("to epoch (%d) must be less than or equal to from epoch (%d)", toEpoch, fromEpoch) + } + + head, err := api.ChainHead(ctx) + if err != nil { + return xerrors.Errorf("failed to get chain head: %w", err) + } + if head.Height() <= abi.ChainEpoch(fromEpoch) { + return xerrors.Errorf("from epoch (%d) must be less than chain head (%d)", fromEpoch, head.Height()) + } + + backfill := cctx.Bool("backfill") + + // Results Tracking + logGood := cctx.Bool("log-good") + + quiet := cctx.Bool("quiet") + + failedRPCs := 0 + successfulBackfills := 0 + successfulValidations := 0 + successfulNullRounds := 0 + + startTime := time.Now() + if !quiet { + _, _ = fmt.Fprintf(cctx.App.Writer, "%s starting chainindex validation; from epoch: %d; to epoch: %d; backfill: %t; log-good: %t\n", currentTimeString(), + fromEpoch, toEpoch, backfill, logGood) + } + totalEpochs := fromEpoch - toEpoch + 1 + haltHeight := -1 + + for epoch := fromEpoch; epoch >= toEpoch; epoch-- { + if ctx.Err() != nil { + return ctx.Err() + } + + if (fromEpoch-epoch+1)%2880 == 0 || epoch == toEpoch { + progress := float64(fromEpoch-epoch+1) / float64(totalEpochs) * 100 + elapsed := time.Since(startTime) + if !quiet { + _, _ = fmt.Fprintf(cctx.App.ErrWriter, "%s -------- Chain index validation progress: %.2f%%; Time elapsed: %s\n", + currentTimeString(), progress, elapsed) + } + } + + indexValidateResp, err := api.ChainValidateIndex(ctx, abi.ChainEpoch(epoch), backfill) + if err != nil { + if strings.Contains(err.Error(), "chain store does not contain data") { + haltHeight = epoch + break + } + + _, _ = fmt.Fprintf(cctx.App.Writer, "%s ✗ Epoch %d; failure: %s\n", currentTimeString(), epoch, err) + failedRPCs++ + continue + } + + if indexValidateResp.Backfilled { + successfulBackfills++ + } else if indexValidateResp.IsNullRound { + successfulNullRounds++ + } else { + successfulValidations++ + } + + if !logGood { + continue + } + + if indexValidateResp.IsNullRound { + _, _ = fmt.Fprintf(cctx.App.Writer, "%s ✓ Epoch %d; null round\n", currentTimeString(), epoch) + } else { + jsonData, err := json.Marshal(indexValidateResp) + if err != nil { + return fmt.Errorf("failed to marshal results to JSON: %w", err) + } + + _, _ = fmt.Fprintf(cctx.App.Writer, "%s ✓ Epoch %d (%s)\n", currentTimeString(), epoch, string(jsonData)) + } + } + + if !quiet { + _, _ = fmt.Fprintf(cctx.App.Writer, "\n%s Chain index validation summary:\n", currentTimeString()) + _, _ = fmt.Fprintf(cctx.App.Writer, "Total failed RPC calls: %d\n", failedRPCs) + _, _ = fmt.Fprintf(cctx.App.Writer, "Total successful backfills: %d\n", successfulBackfills) + _, _ = fmt.Fprintf(cctx.App.Writer, "Total successful validations without backfilling: %d\n", successfulValidations) + _, _ = fmt.Fprintf(cctx.App.Writer, "Total successful Null round validations: %d\n", successfulNullRounds) + } + + if haltHeight >= 0 { + return fmt.Errorf("chain index validation and backfilled halted at height %d as chain state does contain data for that height", haltHeight) + } else if failedRPCs > 0 { + return fmt.Errorf("chain index validation failed with %d RPC errors", failedRPCs) + } + + return nil + }, +} + +func currentTimeString() string { + currentTime := time.Now().Format("2006-01-02 15:04:05.000") + return currentTime +} diff --git a/cmd/lotus-shed/gas-estimation.go b/cmd/lotus-shed/gas-estimation.go index 85573ddb9a0..b1c61b62f2c 100644 --- a/cmd/lotus-shed/gas-estimation.go +++ b/cmd/lotus-shed/gas-estimation.go @@ -19,7 +19,6 @@ import ( "github.com/filecoin-project/lotus/chain/beacon/drand" "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" - "github.com/filecoin-project/lotus/chain/index" proofsffi "github.com/filecoin-project/lotus/chain/proofs/ffi" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" @@ -107,7 +106,8 @@ var gasTraceCmd = &cli.Command{ cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil) defer cs.Close() //nolint:errcheck - sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd, mds, index.DummyMsgIndex) + sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), + shd, mds, nil) if err != nil { return err } @@ -203,7 +203,8 @@ var replayOfflineCmd = &cli.Command{ cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil) defer cs.Close() //nolint:errcheck - sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd, mds, index.DummyMsgIndex) + sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), + shd, mds, nil) if err != nil { return err } diff --git a/cmd/lotus-shed/indexes.go b/cmd/lotus-shed/indexes.go deleted file mode 100644 index e1c9be182fe..00000000000 --- a/cmd/lotus-shed/indexes.go +++ /dev/null @@ -1,1081 +0,0 @@ -package main - -import ( - "context" - "database/sql" - "fmt" - "math" - "path" - "path/filepath" - "strings" - "time" - - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/mitchellh/go-homedir" - "github.com/urfave/cli/v2" - cbg "github.com/whyrusleeping/cbor-gen" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - amt4 "github.com/filecoin-project/go-amt-ipld/v4" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/go-state-types/exitcode" - - lapi "github.com/filecoin-project/lotus/api" - bstore "github.com/filecoin-project/lotus/blockstore" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/types/ethtypes" - lcli "github.com/filecoin-project/lotus/cli" -) - -const ( - // same as in chain/events/index.go - eventExists = `SELECT MAX(id) FROM event WHERE height=? AND tipset_key=? AND tipset_key_cid=? AND emitter_addr=? AND event_index=? AND message_cid=? AND message_index=?` - eventCount = `SELECT COUNT(*) FROM event WHERE tipset_key_cid=?` - entryCount = `SELECT COUNT(*) FROM event_entry JOIN event ON event_entry.event_id=event.id WHERE event.tipset_key_cid=?` - insertEvent = `INSERT OR IGNORE INTO event(height, tipset_key, tipset_key_cid, emitter_addr, event_index, message_cid, message_index, reverted) VALUES(?, ?, ?, ?, ?, ?, ?, ?)` - insertEntry = `INSERT OR IGNORE INTO event_entry(event_id, indexed, flags, key, codec, value) VALUES(?, ?, ?, ?, ?, ?)` - upsertEventsSeen = `INSERT INTO events_seen(height, tipset_key_cid, reverted) VALUES(?, ?, false) ON CONFLICT(height, tipset_key_cid) DO UPDATE SET reverted=false` - tipsetSeen = `SELECT height,reverted FROM events_seen WHERE tipset_key_cid=?` - - // these queries are used to extract just the information used to reconstruct the event AMT from the database - selectEventIdAndEmitter = `SELECT id, emitter_addr FROM event WHERE tipset_key_cid=? and message_cid=? ORDER BY event_index ASC` - selectEventEntries = `SELECT flags, key, codec, value FROM event_entry WHERE event_id=? ORDER BY _rowid_ ASC` -) - -func withCategory(cat string, cmd *cli.Command) *cli.Command { - cmd.Category = strings.ToUpper(cat) - return cmd -} - -var indexesCmd = &cli.Command{ - Name: "indexes", - Usage: "Commands related to managing sqlite indexes", - HideHelpCommand: true, - Subcommands: []*cli.Command{ - withCategory("msgindex", backfillMsgIndexCmd), - withCategory("msgindex", pruneMsgIndexCmd), - withCategory("txhash", backfillTxHashCmd), - withCategory("events", backfillEventsCmd), - withCategory("events", inspectEventsCmd), - }, -} - -var backfillEventsCmd = &cli.Command{ - Name: "backfill-events", - Usage: "Backfill the events.db for a number of epochs starting from a specified height and working backward", - Flags: []cli.Flag{ - &cli.UintFlag{ - Name: "from", - Value: 0, - Usage: "the tipset height to start backfilling from (0 is head of chain)", - }, - &cli.IntFlag{ - Name: "epochs", - Value: 2000, - Usage: "the number of epochs to backfill (working backwards)", - }, - &cli.BoolFlag{ - Name: "temporary-index", - Value: false, - Usage: "use a temporary index to speed up the backfill process", - }, - &cli.BoolFlag{ - Name: "vacuum", - Value: false, - Usage: "run VACUUM on the database after backfilling is complete; this will reclaim space from deleted rows, but may take a long time", - }, - }, - Action: func(cctx *cli.Context) error { - srv, err := lcli.GetFullNodeServices(cctx) - if err != nil { - return err - } - defer srv.Close() //nolint:errcheck - - api := srv.FullNodeAPI() - ctx := lcli.ReqContext(cctx) - - // currTs will be the tipset where we start backfilling from - currTs, err := api.ChainHead(ctx) - if err != nil { - return err - } - if cctx.IsSet("from") { - // we need to fetch the tipset after the epoch being specified since we will need to advance currTs - currTs, err = api.ChainGetTipSetAfterHeight(ctx, abi.ChainEpoch(cctx.Int("from")+1), currTs.Key()) - if err != nil { - return err - } - } - - // advance currTs by one epoch and maintain prevTs as the previous tipset (this allows us to easily use the ChainGetParentMessages/Receipt API) - prevTs := currTs - currTs, err = api.ChainGetTipSet(ctx, currTs.Parents()) - if err != nil { - return fmt.Errorf("failed to load tipset %s: %w", prevTs.Parents(), err) - } - - epochs := cctx.Int("epochs") - - basePath, err := homedir.Expand(cctx.String("repo")) - if err != nil { - return err - } - - log.Infof( - "WARNING: If this command is run against a node that is currently collecting events with DisableHistoricFilterAPI=false, " + - "it may cause the node to fail to record recent events due to the need to obtain an exclusive lock on the database for writes.") - - dbPath := path.Join(basePath, "sqlite", "events.db") - db, err := sql.Open("sqlite3", dbPath+"?_txlock=immediate") - if err != nil { - return err - } - - defer func() { - err := db.Close() - if err != nil { - fmt.Printf("ERROR: closing db: %s", err) - } - }() - - if cctx.Bool("temporary-index") { - log.Info("creating temporary index (tmp_event_backfill_index) on event table to speed up backfill") - _, err := db.Exec("CREATE INDEX IF NOT EXISTS tmp_event_backfill_index ON event (height, tipset_key, tipset_key_cid, emitter_addr, event_index, message_cid, message_index, reverted);") - if err != nil { - return err - } - } - - addressLookups := make(map[abi.ActorID]address.Address) - - // TODO: We don't need this address resolution anymore once https://github.com/filecoin-project/lotus/issues/11594 lands - resolveFn := func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool) { - // we only want to match using f4 addresses - idAddr, err := address.NewIDAddress(uint64(emitter)) - if err != nil { - return address.Undef, false - } - - actor, err := api.StateGetActor(ctx, idAddr, ts.Key()) - if err != nil || actor.DelegatedAddress == nil { - return idAddr, true - } - - return *actor.DelegatedAddress, true - } - - isIndexedValue := func(b uint8) bool { - // currently we mark the full entry as indexed if either the key - // or the value are indexed; in the future we will need finer-grained - // management of indices - return b&(types.EventFlagIndexedKey|types.EventFlagIndexedValue) > 0 - } - - var totalEventsAffected int64 - var totalEntriesAffected int64 - - stmtEventExists, err := db.Prepare(eventExists) - if err != nil { - return err - } - stmtInsertEvent, err := db.Prepare(insertEvent) - if err != nil { - return err - } - stmtInsertEntry, err := db.Prepare(insertEntry) - if err != nil { - return err - } - - stmtUpsertEventSeen, err := db.Prepare(upsertEventsSeen) - if err != nil { - return err - } - - processHeight := func(ctx context.Context, cnt int, msgs []lapi.Message, receipts []*types.MessageReceipt) error { - var tx *sql.Tx - for { - var err error - tx, err = db.BeginTx(ctx, nil) - if err != nil { - if err.Error() == "database is locked" { - log.Warnf("database is locked, retrying in 200ms") - time.Sleep(200 * time.Millisecond) - continue - } - return err - } - break - } - defer tx.Rollback() //nolint:errcheck - - var eventsAffected int64 - var entriesAffected int64 - - tsKeyCid, err := currTs.Key().Cid() - if err != nil { - return fmt.Errorf("failed to get tipset key cid: %w", err) - } - - eventCount := 0 - // loop over each message receipt and backfill the events - for idx, receipt := range receipts { - msg := msgs[idx] - - if receipt.ExitCode != exitcode.Ok { - continue - } - - if receipt.EventsRoot == nil { - continue - } - - events, err := api.ChainGetEvents(ctx, *receipt.EventsRoot) - if err != nil { - return fmt.Errorf("failed to load events for tipset %s: %w", currTs, err) - } - - for _, event := range events { - addr, found := addressLookups[event.Emitter] - if !found { - var ok bool - addr, ok = resolveFn(ctx, event.Emitter, currTs) - if !ok { - // not an address we will be able to match against - continue - } - addressLookups[event.Emitter] = addr - } - - // select the highest event id that exists in database, or null if none exists - var entryID sql.NullInt64 - err = tx.Stmt(stmtEventExists).QueryRow( - currTs.Height(), - currTs.Key().Bytes(), - tsKeyCid.Bytes(), - addr.Bytes(), - eventCount, - msg.Cid.Bytes(), - idx, - ).Scan(&entryID) - if err != nil { - return fmt.Errorf("error checking if event exists: %w", err) - } - - // we already have this event - if entryID.Valid { - continue - } - - // event does not exist, lets backfill it - res, err := tx.Stmt(stmtInsertEvent).Exec( - currTs.Height(), // height - currTs.Key().Bytes(), // tipset_key - tsKeyCid.Bytes(), // tipset_key_cid - addr.Bytes(), // emitter_addr - eventCount, // event_index - msg.Cid.Bytes(), // message_cid - idx, // message_index - false, // reverted - ) - if err != nil { - return fmt.Errorf("error inserting event: %w", err) - } - - entryID.Int64, err = res.LastInsertId() - if err != nil { - return fmt.Errorf("could not get last insert id: %w", err) - } - - rowsAffected, err := res.RowsAffected() - if err != nil { - return fmt.Errorf("could not get rows affected: %w", err) - } - eventsAffected += rowsAffected - - // backfill the event entries - for _, entry := range event.Entries { - _, err := tx.Stmt(stmtInsertEntry).Exec( - entryID.Int64, // event_id - isIndexedValue(entry.Flags), // indexed - []byte{entry.Flags}, // flags - entry.Key, // key - entry.Codec, // codec - entry.Value, // value - ) - if err != nil { - return fmt.Errorf("error inserting entry: %w", err) - } - - rowsAffected, err := res.RowsAffected() - if err != nil { - return fmt.Errorf("could not get rows affected: %w", err) - } - entriesAffected += rowsAffected - } - eventCount++ - } - } - - // mark the tipset as processed - _, err = tx.Stmt(stmtUpsertEventSeen).Exec( - currTs.Height(), - tsKeyCid.Bytes(), - ) - if err != nil { - return xerrors.Errorf("exec upsert events seen: %w", err) - } - - err = tx.Commit() - if err != nil { - return fmt.Errorf("failed to commit transaction: %w", err) - } - - log.Infof("[%d] backfilling actor events epoch:%d, eventsAffected:%d, entriesAffected:%d", cnt, currTs.Height(), eventsAffected, entriesAffected) - - totalEventsAffected += eventsAffected - totalEntriesAffected += entriesAffected - - return nil - } - - for i := 0; i < epochs; i++ { - select { - case <-ctx.Done(): - return nil - default: - } - - blockCid := prevTs.Blocks()[0].Cid() - - // get messages for the parent of the previous tipset (which will be currTs) - msgs, err := api.ChainGetParentMessages(ctx, blockCid) - if err != nil { - return fmt.Errorf("failed to get parent messages for block %s: %w", blockCid, err) - } - - // get receipts for the parent of the previous tipset (which will be currTs) - receipts, err := api.ChainGetParentReceipts(ctx, blockCid) - if err != nil { - return fmt.Errorf("failed to get parent receipts for block %s: %w", blockCid, err) - } - - if len(msgs) != len(receipts) { - return fmt.Errorf("mismatched in message and receipt count: %d != %d", len(msgs), len(receipts)) - } - - err = processHeight(ctx, i, msgs, receipts) - if err != nil { - return err - } - - // advance prevTs and currTs up the chain - prevTs = currTs - currTs, err = api.ChainGetTipSet(ctx, currTs.Parents()) - if err != nil { - return fmt.Errorf("failed to load tipset %s: %w", currTs, err) - } - } - - log.Infof("backfilling events complete, totalEventsAffected:%d, totalEntriesAffected:%d", totalEventsAffected, totalEntriesAffected) - - if cctx.Bool("temporary-index") { - log.Info("dropping temporary index (tmp_event_backfill_index) on event table") - _, err := db.Exec("DROP INDEX IF EXISTS tmp_event_backfill_index;") - if err != nil { - fmt.Printf("ERROR: dropping index: %s", err) - } - } - - if cctx.Bool("vacuum") { - log.Info("running VACUUM on the database") - _, err := db.Exec("VACUUM;") - if err != nil { - return fmt.Errorf("failed to run VACUUM on the database: %w", err) - } - } - - return nil - }, -} - -var inspectEventsCmd = &cli.Command{ - Name: "inspect-events", - Usage: "Perform a health-check on the events.db for a number of epochs starting from a specified height and working backward. " + - "Logs tipsets with problems and optionally logs tipsets without problems. Without specifying a fixed number of epochs, " + - "the command will continue until it reaches a tipset that is not in the blockstore.", - Flags: []cli.Flag{ - &cli.UintFlag{ - Name: "from", - Value: 0, - Usage: "the tipset height to start inspecting from (0 is head of chain)", - }, - &cli.IntFlag{ - Name: "epochs", - Value: 0, - Usage: "the number of epochs to inspect (working backwards) [0 = until we reach a block we don't have]", - }, - &cli.BoolFlag{ - Name: "log-good", - Usage: "log tipsets that have no detected problems", - Value: false, - }, - }, - Action: func(cctx *cli.Context) error { - srv, err := lcli.GetFullNodeServices(cctx) - if err != nil { - return err - } - defer srv.Close() //nolint:errcheck - - api := srv.FullNodeAPI() - ctx := lcli.ReqContext(cctx) - - // currTs will be the tipset where we start backfilling from - currTs, err := api.ChainHead(ctx) - if err != nil { - return err - } - if cctx.IsSet("from") { - // we need to fetch the tipset after the epoch being specified since we will need to advance currTs - currTs, err = api.ChainGetTipSetAfterHeight(ctx, abi.ChainEpoch(cctx.Int("from")+1), currTs.Key()) - if err != nil { - return err - } - } - - logGood := cctx.Bool("log-good") - - // advance currTs by one epoch and maintain prevTs as the previous tipset (this allows us to easily use the ChainGetParentMessages/Receipt API) - prevTs := currTs - currTs, err = api.ChainGetTipSet(ctx, currTs.Parents()) - if err != nil { - return fmt.Errorf("failed to load tipset %s: %w", prevTs.Parents(), err) - } - - epochs := cctx.Int("epochs") - if epochs <= 0 { - epochs = math.MaxInt32 - } - - basePath, err := homedir.Expand(cctx.String("repo")) - if err != nil { - return err - } - - dbPath := path.Join(basePath, "sqlite", "events.db") - db, err := sql.Open("sqlite3", dbPath+"?mode=ro") - if err != nil { - return err - } - - defer func() { - err := db.Close() - if err != nil { - fmt.Printf("ERROR: closing db: %s", err) - } - }() - - stmtEventCount, err := db.Prepare(eventCount) - if err != nil { - return err - } - stmtEntryCount, err := db.Prepare(entryCount) - if err != nil { - return err - } - stmtTipsetSeen, err := db.Prepare(tipsetSeen) - if err != nil { - return err - } - stmtSelectEventIdAndEmitter, err := db.Prepare(selectEventIdAndEmitter) - if err != nil { - return err - } - stmtSelectEventEntries, err := db.Prepare(selectEventEntries) - if err != nil { - return err - } - - processHeight := func(ctx context.Context, messages []lapi.Message, receipts []*types.MessageReceipt) error { - tsKeyCid, err := currTs.Key().Cid() - if err != nil { - return xerrors.Errorf("failed to get tipset key cid: %w", err) - } - - var problems []string - - checkEventAndEntryCounts := func() error { - // compare by counting events, using ChainGetEvents to load the events from the chain - expectEvents, expectEntries, err := chainEventAndEntryCountsAt(ctx, currTs, receipts, api) - if err != nil { - return err - } - - actualEvents, actualEntries, err := dbEventAndEntryCountsAt(currTs, stmtEventCount, stmtEntryCount) - if err != nil { - return err - } - - if actualEvents != expectEvents { - problems = append(problems, fmt.Sprintf("expected %d events, got %d", expectEvents, actualEvents)) - } - if actualEntries != expectEntries { - problems = append(problems, fmt.Sprintf("expected %d entries, got %d", expectEntries, actualEntries)) - } - - return nil - } - - // Compare the AMT roots: we reconstruct the event AMT from the database data we have and - // compare it with the on-chain AMT root from the receipt. If it's the same CID then we have - // exactly the same event data. Any variation, in number of events, and even a single byte - // in event data, will be considered a mismatch. - - // cache for address -> actorID because it's typical for tipsets to generate many events for - // the same actors so we can try and avoid too many StateLookupID calls - addrIdCache := make(map[address.Address]abi.ActorID) - - eventIndex := 0 - var hasEvents bool - for msgIndex, receipt := range receipts { - if receipt.EventsRoot == nil { - continue - } - - amtRoot, has, problem, err := amtRootForEvents( - ctx, - api, - tsKeyCid, - prevTs.Key(), - stmtSelectEventIdAndEmitter, - stmtSelectEventEntries, - messages[msgIndex], - addrIdCache, - ) - if err != nil { - return err - } - if has && !hasEvents { - hasEvents = true - } - - if problem != "" { - problems = append(problems, problem) - } else if amtRoot != *receipt.EventsRoot { - problems = append(problems, fmt.Sprintf("events root mismatch for message %s", messages[msgIndex].Cid)) - // also provide more information about the mismatch - if err := checkEventAndEntryCounts(); err != nil { - return err - } - } - - eventIndex++ - } - - var seenHeight int - var seenReverted int - if err := stmtTipsetSeen.QueryRow(tsKeyCid.Bytes()).Scan(&seenHeight, &seenReverted); err != nil { - if err == sql.ErrNoRows { - if hasEvents { - problems = append(problems, "not in events_seen table") - } else { - problems = append(problems, "zero-event epoch not in events_seen table") - } - } else { - return xerrors.Errorf("failed to check if tipset is seen: %w", err) - } - } else { - if seenHeight != int(currTs.Height()) { - problems = append(problems, fmt.Sprintf("events_seen height mismatch (%d)", seenHeight)) - } - if seenReverted != 0 { - problems = append(problems, "events_seen marked as reverted") - } - } - - if len(problems) > 0 { - _, _ = fmt.Fprintf(cctx.App.Writer, "✗ Epoch %d (%s): %s\n", currTs.Height(), tsKeyCid, strings.Join(problems, ", ")) - } else if logGood { - _, _ = fmt.Fprintf(cctx.App.Writer, "✓ Epoch %d (%s)\n", currTs.Height(), tsKeyCid) - } - - return nil - } - - for i := 0; ctx.Err() == nil && i < epochs; i++ { - // get receipts and messages for the parent of the previous tipset (which will be currTs) - - blockCid := prevTs.Blocks()[0].Cid() - - messages, err := api.ChainGetParentMessages(ctx, blockCid) - if err != nil { - _, _ = fmt.Fprintf(cctx.App.ErrWriter, "Missing parent messages for epoch %d (checked %d epochs)", prevTs.Height(), i) - break - } - receipts, err := api.ChainGetParentReceipts(ctx, blockCid) - if err != nil { - _, _ = fmt.Fprintf(cctx.App.ErrWriter, "Missing parent receipts for epoch %d (checked %d epochs)", prevTs.Height(), i) - break - } - - if len(messages) != len(receipts) { - return fmt.Errorf("mismatched in message and receipt count: %d != %d", len(messages), len(receipts)) - } - - err = processHeight(ctx, messages, receipts) - if err != nil { - return err - } - - // advance prevTs and currTs up the chain - prevTs = currTs - currTs, err = api.ChainGetTipSet(ctx, currTs.Parents()) - if err != nil { - return xerrors.Errorf("failed to load tipset %s: %w", currTs, err) - } - } - - return nil - }, -} - -// amtRootForEvents generates the events AMT root CID for a given message's events, and returns -// whether the message has events, a string describing any non-fatal problem encountered, -// and a fatal error if one occurred. -func amtRootForEvents( - ctx context.Context, - api lapi.FullNode, - tsKeyCid cid.Cid, - prevTsKey types.TipSetKey, - stmtSelectEventIdAndEmitter, stmtSelectEventEntries *sql.Stmt, - message lapi.Message, - addrIdCache map[address.Address]abi.ActorID, -) (cid.Cid, bool, string, error) { - - events := make([]cbg.CBORMarshaler, 0) - - rows, err := stmtSelectEventIdAndEmitter.Query(tsKeyCid.Bytes(), message.Cid.Bytes()) - if err != nil { - return cid.Undef, false, "", xerrors.Errorf("failed to query events: %w", err) - } - defer func() { - _ = rows.Close() - }() - - for rows.Next() { - var eventId int - var emitterAddr []byte - if err := rows.Scan(&eventId, &emitterAddr); err != nil { - return cid.Undef, false, "", xerrors.Errorf("failed to scan row: %w", err) - } - - addr, err := address.NewFromBytes(emitterAddr) - if err != nil { - return cid.Undef, false, "", xerrors.Errorf("failed to parse address: %w", err) - } - var actorId abi.ActorID - if id, ok := addrIdCache[addr]; ok { - actorId = id - } else { - if addr.Protocol() != address.ID { - // use the previous tipset (height+1) to do an address lookup because the actor - // may have been created in the current tipset (i.e. deferred execution means the - // changed state isn't available until the next epoch) - idAddr, err := api.StateLookupID(ctx, addr, prevTsKey) - if err != nil { - // TODO: fix this? we should be able to resolve all addresses - return cid.Undef, false, fmt.Sprintf("failed to resolve address (%s), could not compare amt", addr.String()), nil - } - addr = idAddr - } - id, err := address.IDFromAddress(addr) - if err != nil { - return cid.Undef, false, "", xerrors.Errorf("failed to get ID from address: %w", err) - } - actorId = abi.ActorID(id) - addrIdCache[addr] = actorId - } - - event := types.Event{ - Emitter: actorId, - Entries: make([]types.EventEntry, 0), - } - - rows2, err := stmtSelectEventEntries.Query(eventId) - if err != nil { - return cid.Undef, false, "", xerrors.Errorf("failed to query event entries: %w", err) - } - defer func() { - _ = rows2.Close() - }() - - for rows2.Next() { - var flags []byte - var key string - var codec uint64 - var value []byte - if err := rows2.Scan(&flags, &key, &codec, &value); err != nil { - return cid.Undef, false, "", xerrors.Errorf("failed to scan row: %w", err) - } - entry := types.EventEntry{ - Flags: flags[0], - Key: key, - Codec: codec, - Value: value, - } - event.Entries = append(event.Entries, entry) - } - - events = append(events, &event) - } - - // construct the AMT from our slice to an in-memory IPLD store just so we can get the root, - // we don't need the blocks themselves - root, err := amt4.FromArray(ctx, cbor.NewCborStore(bstore.NewMemory()), events, amt4.UseTreeBitWidth(types.EventAMTBitwidth)) - if err != nil { - return cid.Undef, false, "", xerrors.Errorf("failed to create AMT: %w", err) - } - return root, len(events) > 0, "", nil -} - -func chainEventAndEntryCountsAt(ctx context.Context, ts *types.TipSet, receipts []*types.MessageReceipt, api lapi.FullNode) (int, int, error) { - var expectEvents int - var expectEntries int - for _, receipt := range receipts { - if receipt.ExitCode != exitcode.Ok || receipt.EventsRoot == nil { - continue - } - events, err := api.ChainGetEvents(ctx, *receipt.EventsRoot) - if err != nil { - return 0, 0, xerrors.Errorf("failed to load events for tipset %s: %w", ts, err) - } - expectEvents += len(events) - for _, event := range events { - expectEntries += len(event.Entries) - } - } - return expectEvents, expectEntries, nil -} - -func dbEventAndEntryCountsAt(ts *types.TipSet, stmtEventCount, stmtEntryCount *sql.Stmt) (int, int, error) { - tsKeyCid, err := ts.Key().Cid() - if err != nil { - return 0, 0, xerrors.Errorf("failed to get tipset key cid: %w", err) - } - var actualEvents int - if err := stmtEventCount.QueryRow(tsKeyCid.Bytes()).Scan(&actualEvents); err != nil { - return 0, 0, xerrors.Errorf("failed to count events for epoch %d (tsk CID %s): %w", ts.Height(), tsKeyCid, err) - } - var actualEntries int - if err := stmtEntryCount.QueryRow(tsKeyCid.Bytes()).Scan(&actualEntries); err != nil { - return 0, 0, xerrors.Errorf("failed to count entries for epoch %d (tsk CID %s): %w", ts.Height(), tsKeyCid, err) - } - return actualEvents, actualEntries, nil -} - -var backfillMsgIndexCmd = &cli.Command{ - Name: "backfill-msgindex", - Usage: "Backfill the msgindex.db for a number of epochs starting from a specified height", - Flags: []cli.Flag{ - &cli.IntFlag{ - Name: "from", - Value: 0, - Usage: "height to start the backfill; uses the current head if omitted", - }, - &cli.IntFlag{ - Name: "epochs", - Value: 1800, - Usage: "number of epochs to backfill; defaults to 1800 (2 finalities)", - }, - }, - Action: func(cctx *cli.Context) error { - api, closer, err := lcli.GetFullNodeAPI(cctx) - if err != nil { - return err - } - - defer closer() - ctx := lcli.ReqContext(cctx) - - curTs, err := api.ChainHead(ctx) - if err != nil { - return err - } - - startHeight := int64(cctx.Int("from")) - if startHeight == 0 { - startHeight = int64(curTs.Height()) - 1 - } - epochs := cctx.Int("epochs") - - basePath, err := homedir.Expand(cctx.String("repo")) - if err != nil { - return err - } - - dbPath := path.Join(basePath, "sqlite", "msgindex.db") - db, err := sql.Open("sqlite3", dbPath) - if err != nil { - return err - } - - defer func() { - err := db.Close() - if err != nil { - fmt.Printf("ERROR: closing db: %s", err) - } - }() - - insertStmt, err := db.Prepare("INSERT OR IGNORE INTO messages (cid, tipset_cid, epoch) VALUES (?, ?, ?)") - if err != nil { - return err - } - - var nrRowsAffected int64 - for i := 0; i < epochs; i++ { - epoch := abi.ChainEpoch(startHeight - int64(i)) - - if i%100 == 0 { - log.Infof("%d/%d processing epoch:%d, nrRowsAffected:%d", i, epochs, epoch, nrRowsAffected) - } - - ts, err := api.ChainGetTipSetByHeight(ctx, epoch, curTs.Key()) - if err != nil { - return fmt.Errorf("failed to get tipset at epoch %d: %w", epoch, err) - } - - tsCid, err := ts.Key().Cid() - if err != nil { - return fmt.Errorf("failed to get tipset cid at epoch %d: %w", epoch, err) - } - - msgs, err := api.ChainGetMessagesInTipset(ctx, ts.Key()) - if err != nil { - return fmt.Errorf("failed to get messages in tipset at epoch %d: %w", epoch, err) - } - - for _, msg := range msgs { - key := msg.Cid.String() - tskey := tsCid.String() - res, err := insertStmt.Exec(key, tskey, int64(epoch)) - if err != nil { - return fmt.Errorf("failed to insert message cid %s in tipset %s at epoch %d: %w", key, tskey, epoch, err) - } - rowsAffected, err := res.RowsAffected() - if err != nil { - return fmt.Errorf("failed to get rows affected for message cid %s in tipset %s at epoch %d: %w", key, tskey, epoch, err) - } - nrRowsAffected += rowsAffected - } - } - - log.Infof("Done backfilling, nrRowsAffected:%d", nrRowsAffected) - - return nil - }, -} - -var pruneMsgIndexCmd = &cli.Command{ - Name: "prune-msgindex", - Usage: "Prune the msgindex.db for messages included before a given epoch", - Flags: []cli.Flag{ - &cli.IntFlag{ - Name: "from", - Usage: "height to start the prune; if negative it indicates epochs from current head", - }, - }, - Action: func(cctx *cli.Context) error { - api, closer, err := lcli.GetFullNodeAPI(cctx) - if err != nil { - return err - } - - defer closer() - ctx := lcli.ReqContext(cctx) - - startHeight := int64(cctx.Int("from")) - if startHeight < 0 { - curTs, err := api.ChainHead(ctx) - if err != nil { - return err - } - - startHeight += int64(curTs.Height()) - - if startHeight < 0 { - return xerrors.Errorf("bogus start height %d", startHeight) - } - } - - basePath, err := homedir.Expand(cctx.String("repo")) - if err != nil { - return err - } - - dbPath := path.Join(basePath, "sqlite", "msgindex.db") - db, err := sql.Open("sqlite3", dbPath) - if err != nil { - return err - } - - defer func() { - err := db.Close() - if err != nil { - fmt.Printf("ERROR: closing db: %s", err) - } - }() - - tx, err := db.Begin() - if err != nil { - return err - } - - if _, err := tx.Exec("DELETE FROM messages WHERE epoch < ?", startHeight); err != nil { - if err := tx.Rollback(); err != nil { - fmt.Printf("ERROR: rollback: %s", err) - } - return err - } - - if err := tx.Commit(); err != nil { - return err - } - - return nil - }, -} - -var backfillTxHashCmd = &cli.Command{ - Name: "backfill-txhash", - Usage: "Backfills the txhash.db for a number of epochs starting from a specified height", - Flags: []cli.Flag{ - &cli.UintFlag{ - Name: "from", - Value: 0, - Usage: "the tipset height to start backfilling from (0 is head of chain)", - }, - &cli.IntFlag{ - Name: "epochs", - Value: 2000, - Usage: "the number of epochs to backfill", - }, - }, - Action: func(cctx *cli.Context) error { - api, closer, err := lcli.GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer closer() - - ctx := lcli.ReqContext(cctx) - - curTs, err := api.ChainHead(ctx) - if err != nil { - return err - } - - startHeight := int64(cctx.Int("from")) - if startHeight == 0 { - startHeight = int64(curTs.Height()) - 1 - } - - epochs := cctx.Int("epochs") - - basePath, err := homedir.Expand(cctx.String("repo")) - if err != nil { - return err - } - - dbPath := filepath.Join(basePath, "sqlite", "txhash.db") - db, err := sql.Open("sqlite3", dbPath) - if err != nil { - return err - } - - defer func() { - err := db.Close() - if err != nil { - fmt.Printf("ERROR: closing db: %s", err) - } - }() - - insertStmt, err := db.Prepare("INSERT OR IGNORE INTO eth_tx_hashes(hash, cid) VALUES(?, ?)") - if err != nil { - return err - } - - var totalRowsAffected int64 = 0 - for i := 0; i < epochs; i++ { - epoch := abi.ChainEpoch(startHeight - int64(i)) - - select { - case <-cctx.Done(): - fmt.Println("request cancelled") - return nil - default: - } - - curTsk := curTs.Parents() - execTs, err := api.ChainGetTipSet(ctx, curTsk) - if err != nil { - return fmt.Errorf("failed to call ChainGetTipSet for %s: %w", curTsk, err) - } - - if i%100 == 0 { - log.Infof("%d/%d processing epoch:%d", i, epochs, epoch) - } - - for _, blockheader := range execTs.Blocks() { - blkMsgs, err := api.ChainGetBlockMessages(ctx, blockheader.Cid()) - if err != nil { - log.Infof("Could not get block messages at epoch: %d, stopping walking up the chain", epoch) - epochs = i - break - } - - for _, smsg := range blkMsgs.SecpkMessages { - if smsg.Signature.Type != crypto.SigTypeDelegated { - continue - } - - tx, err := ethtypes.EthTransactionFromSignedFilecoinMessage(smsg) - if err != nil { - return fmt.Errorf("failed to convert from signed message: %w at epoch: %d", err, epoch) - } - - hash, err := tx.TxHash() - if err != nil { - return fmt.Errorf("failed to calculate hash for ethTx: %w at epoch: %d", err, epoch) - } - - res, err := insertStmt.Exec(hash.String(), smsg.Cid().String()) - if err != nil { - return fmt.Errorf("error inserting tx mapping to db: %s at epoch: %d", err, epoch) - } - - rowsAffected, err := res.RowsAffected() - if err != nil { - return fmt.Errorf("error getting rows affected: %s at epoch: %d", err, epoch) - } - - if rowsAffected > 0 { - log.Debugf("Inserted txhash %s, cid: %s at epoch: %d", hash.String(), smsg.Cid().String(), epoch) - } - - totalRowsAffected += rowsAffected - } - } - - curTs = execTs - } - - log.Infof("Done, inserted %d missing txhashes", totalRowsAffected) - - return nil - }, -} diff --git a/cmd/lotus-shed/invariants.go b/cmd/lotus-shed/invariants.go index 91db29e613b..e8a6e734af9 100644 --- a/cmd/lotus-shed/invariants.go +++ b/cmd/lotus-shed/invariants.go @@ -30,7 +30,6 @@ import ( "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" - "github.com/filecoin-project/lotus/chain/index" proofsffi "github.com/filecoin-project/lotus/chain/proofs/ffi" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" @@ -128,7 +127,7 @@ var invariantsCmd = &cli.Command{ cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil) defer cs.Close() //nolint:errcheck - sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, index.DummyMsgIndex) + sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, nil) if err != nil { return err } diff --git a/cmd/lotus-shed/main.go b/cmd/lotus-shed/main.go index 911da346e97..250ea129cc5 100644 --- a/cmd/lotus-shed/main.go +++ b/cmd/lotus-shed/main.go @@ -86,7 +86,7 @@ func main() { invariantsCmd, gasTraceCmd, replayOfflineCmd, - indexesCmd, + chainIndexCmds, FevmAnalyticsCmd, mismatchesCmd, blockCmd, diff --git a/cmd/lotus-shed/migrations.go b/cmd/lotus-shed/migrations.go index 11f09c4d62c..10b7d073d5f 100644 --- a/cmd/lotus-shed/migrations.go +++ b/cmd/lotus-shed/migrations.go @@ -62,7 +62,6 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" - "github.com/filecoin-project/lotus/chain/index" proofsffi "github.com/filecoin-project/lotus/chain/proofs/ffi" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/stmgr" @@ -178,7 +177,8 @@ var migrationsCmd = &cli.Command{ defer cs.Close() //nolint:errcheck // Note: we use a map datastore for the metadata to avoid writing / using cached migration results in the metadata store - sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, datastore.NewMapDatastore(), index.DummyMsgIndex) + sm, err := stmgr.NewStateManager(cs, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, + datastore.NewMapDatastore(), nil) if err != nil { return err } diff --git a/cmd/lotus-shed/state-stats.go b/cmd/lotus-shed/state-stats.go index cf865f20194..e5145e5c178 100644 --- a/cmd/lotus-shed/state-stats.go +++ b/cmd/lotus-shed/state-stats.go @@ -33,7 +33,6 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" - "github.com/filecoin-project/lotus/chain/index" proofsffi "github.com/filecoin-project/lotus/chain/proofs/ffi" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" @@ -259,7 +258,7 @@ func loadChainStore(ctx context.Context, repoPath string) (*StoreHandle, error) } tsExec := consensus.NewTipSetExecutor(filcns.RewardFunc) - sm, err := stmgr.NewStateManager(cs, tsExec, vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, index.DummyMsgIndex) + sm, err := stmgr.NewStateManager(cs, tsExec, vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil, mds, nil) if err != nil { return nil, fmt.Errorf("failed to open state manager: %w", err) } diff --git a/cmd/lotus-sim/simulation/node.go b/cmd/lotus-sim/simulation/node.go index fd9c0284614..cda3e69d839 100644 --- a/cmd/lotus-sim/simulation/node.go +++ b/cmd/lotus-sim/simulation/node.go @@ -12,7 +12,6 @@ import ( "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" - "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" @@ -107,7 +106,8 @@ func (nd *Node) LoadSim(ctx context.Context, name string) (*Simulation, error) { if err != nil { return nil, xerrors.Errorf("failed to create upgrade schedule for simulation %s: %w", name, err) } - sim.StateManager, err = stmgr.NewStateManager(nd.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(mock.Verifier), us, nil, nd.MetadataDS, index.DummyMsgIndex) + sim.StateManager, err = stmgr.NewStateManager(nd.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(mock.Verifier), us, + nil, nd.MetadataDS, nil) if err != nil { return nil, xerrors.Errorf("failed to create state manager for simulation %s: %w", name, err) } @@ -126,7 +126,8 @@ func (nd *Node) CreateSim(ctx context.Context, name string, head *types.TipSet) if err != nil { return nil, err } - sm, err := stmgr.NewStateManager(nd.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(mock.Verifier), filcns.DefaultUpgradeSchedule(), nil, nd.MetadataDS, index.DummyMsgIndex) + sm, err := stmgr.NewStateManager(nd.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), + vm.Syscalls(mock.Verifier), filcns.DefaultUpgradeSchedule(), nil, nd.MetadataDS, nil) if err != nil { return nil, xerrors.Errorf("creating state manager: %w", err) } diff --git a/cmd/lotus-sim/simulation/simulation.go b/cmd/lotus-sim/simulation/simulation.go index d73a033cf96..9e85c7d6260 100644 --- a/cmd/lotus-sim/simulation/simulation.go +++ b/cmd/lotus-sim/simulation/simulation.go @@ -17,7 +17,6 @@ import ( "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" - "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" @@ -202,7 +201,8 @@ func (sim *Simulation) SetUpgradeHeight(nv network.Version, epoch abi.ChainEpoch if err != nil { return err } - sm, err := stmgr.NewStateManager(sim.Node.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(mock.Verifier), newUpgradeSchedule, nil, sim.Node.MetadataDS, index.DummyMsgIndex) + sm, err := stmgr.NewStateManager(sim.Node.Chainstore, consensus.NewTipSetExecutor(filcns.RewardFunc), + vm.Syscalls(mock.Verifier), newUpgradeSchedule, nil, sim.Node.MetadataDS, nil) if err != nil { return err } diff --git a/cmd/lotus/daemon.go b/cmd/lotus/daemon.go index fa9be241c2e..b7fbd63e695 100644 --- a/cmd/lotus/daemon.go +++ b/cmd/lotus/daemon.go @@ -612,7 +612,8 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) return xerrors.Errorf("failed to construct beacon schedule: %w", err) } - stm, err := stmgr.NewStateManager(cst, consensus.NewTipSetExecutor(filcns.RewardFunc), vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd, mds, index.DummyMsgIndex) + stm, err := stmgr.NewStateManager(cst, consensus.NewTipSetExecutor(filcns.RewardFunc), + vm.Syscalls(proofsffi.ProofVerifier), filcns.DefaultUpgradeSchedule(), shd, mds, nil) if err != nil { return err } @@ -628,8 +629,6 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) return err } - // populate the message index if user has EnableMsgIndex enabled - // c, err := lr.Config() if err != nil { return err @@ -638,17 +637,23 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) if !ok { return xerrors.Errorf("invalid config for repo, got: %T", c) } - if cfg.Index.EnableMsgIndex { - log.Info("populating message index...") - basePath, err := lr.SqlitePath() - if err != nil { - return err - } - if err := index.PopulateAfterSnapshot(ctx, filepath.Join(basePath, index.DefaultDbFilename), cst); err != nil { - return err - } - log.Info("populating message index done") + + if !cfg.ChainIndexer.EnableIndexer { + log.Info("chain indexer is disabled, not populating index from snapshot") + return nil + } + + // populate the chain Index from the snapshot + basePath, err := lr.ChainIndexPath() + if err != nil { + return err + } + + log.Info("populating chain index from snapshot...") + if err := index.PopulateFromSnapshot(ctx, filepath.Join(basePath, index.DefaultDbFilename), cst); err != nil { + return err } + log.Info("populating chain index from snapshot done") return nil } diff --git a/conformance/driver.go b/conformance/driver.go index f0dd5cc2a4c..15ae567063a 100644 --- a/conformance/driver.go +++ b/conformance/driver.go @@ -22,7 +22,6 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" - "github.com/filecoin-project/lotus/chain/index" proofsffi "github.com/filecoin-project/lotus/chain/proofs/ffi" "github.com/filecoin-project/lotus/chain/rand" "github.com/filecoin-project/lotus/chain/state" @@ -110,7 +109,7 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, params cs = store.NewChainStore(bs, bs, ds, filcns.Weight, nil) tse = consensus.NewTipSetExecutor(filcns.RewardFunc) - sm, err = stmgr.NewStateManager(cs, tse, syscalls, filcns.DefaultUpgradeSchedule(), nil, ds, index.DummyMsgIndex) + sm, err = stmgr.NewStateManager(cs, tse, syscalls, filcns.DefaultUpgradeSchedule(), nil, ds, nil) ) if err != nil { return nil, err diff --git a/documentation/en/api-v1-unstable-methods.md b/documentation/en/api-v1-unstable-methods.md index 60c0479b171..655c1034ecc 100644 --- a/documentation/en/api-v1-unstable-methods.md +++ b/documentation/en/api-v1-unstable-methods.md @@ -37,6 +37,7 @@ * [ChainSetHead](#ChainSetHead) * [ChainStatObj](#ChainStatObj) * [ChainTipSetWeight](#ChainTipSetWeight) + * [ChainValidateIndex](#ChainValidateIndex) * [Create](#Create) * [CreateBackup](#CreateBackup) * [Eth](#Eth) @@ -1241,6 +1242,65 @@ Inputs: Response: `"0"` +### ChainValidateIndex +ChainValidateIndex validates the integrity of and optionally backfills +the chain index at a specific epoch. + +It can be used to: + +1. Validate the chain index at a specific epoch: + - Ensures consistency between indexed data and actual chain state + - Reports any errors found during validation (i.e. the indexed data does not match the actual chain state, missing data, etc.) + +2. Optionally backfill missing data: + - Backfills data if the index is missing information for the specified epoch + - Backfilling only occurs when the `backfill` parameter is set to `true` + +3. Detect "holes" in the index: + - If `backfill` is `false` and the index lacks data for the specified epoch, the API returns an error indicating missing data + +Parameters: + - epoch: The specific chain epoch for which to validate/backfill the index. + - backfill: A boolean flag indicating whether to attempt backfilling of missing data if the index does not have data for the + specified epoch. + +Returns: + - *types.IndexValidation: A pointer to an IndexValidation struct containing the results of the validation/backfill. + - error: An error object if the validation/backfill fails. The error message will contain details about the index + corruption if the call fails because of an incosistency between indexed data and the actual chain state. + Note: The API returns an error if the index does not have data for the specified epoch and backfill is set to false. + + +Perms: write + +Inputs: +```json +[ + 10101, + true +] +``` + +Response: +```json +{ + "TipSetKey": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve" + } + ], + "Height": 10101, + "IndexedMessagesCount": 42, + "IndexedEventsCount": 42, + "IndexedEventEntriesCount": 42, + "Backfilled": true, + "IsNullRound": true +} +``` + ## Create diff --git a/documentation/en/chain-indexer-overview-for-operators.md b/documentation/en/chain-indexer-overview-for-operators.md new file mode 100644 index 00000000000..0e5869c510b --- /dev/null +++ b/documentation/en/chain-indexer-overview-for-operators.md @@ -0,0 +1,418 @@ +# ChainIndexer Documentation for Operators + +- [Introduction](#introduction) +- [ChainIndexer Config](#chainindexer-config) + - [Enablement](#enablement) + - [Garbage Collection](#garbage-collection) + - [Recommendations](#recommendations) + - [Removed Options](#removed-options) +- [Upgrade](#upgrade) + - [Preparation](#preparation) + - [Upgrade when using existing `LOTUS_PATH` chain state](#upgrade-when-using-existing-lotus_path-chain-state) + - [Part 1: Create a backfilled ChainIndexer `chainindex.db`](#part-1-create-a-backfilled-chainindexer-chainindexdb) + - [Part 2: Create a copyable `chainindex.db`](#part-2-create-a-copyable-chainindexdb) + - [Part 3: Update other nodes](#part-3-update-other-nodes) + - [Part 4: Cleanup](#part-4-cleanup) + - [Upgrade when importing chain state from a snapshot](#upgrade-when-importing-chain-state-from-a-snapshot) +- [Backfill](#backfill) + - [Backfill Timing](#backfill-timing) + - [Backfill Disk Space Requirements](#backfill-disk-space-requirements) + - [`lotus-shed chainindex validate-backfill` CLI tool](#lotus-shed-chainindex-validate-backfill-cli-tool) + - [Usage](#usage) +- [Regular Checks](#regular-checks) +- [Downgrade Steps](#downgrade-steps) +- [Terminology](#terminology) + - [Previous Indexing System](#previous-indexing-system) + - [ChainIndexer Indexing System](#chainindexer-indexing-system) +- [Appendix](#appendix) + - [Why isn't there an automated migration from the previous indexing system to the ChainIndexer indexing system?](#why-isnt-there-an-automated-migration-from-the-previous-indexing-system-to-the-chainindexer-indexing-system) + - [`ChainValidateIndex` RPC API](#chainvalidateindex-rpc-api) + +## Introduction + +This document is for externally-available, high-performance RPC providers and for node operators who use or expose the Ethereum and/or events APIs. It walks through the configuration changes, migration flow and operations/maintenance work needed to enable, backfill and maintain the [`ChainIndexer`](#chainindexer-indexing-system). The justification for and benefits of the `ChainIndexer` are documented [here](https://github.com/filecoin-project/lotus/issues/12453). + +The ChainIndexer is now also required if you enable: +1. Ethereum (`eth_*`) APIs using the `EnableEthRPC` Lotus configuration option OR +2. ActorEvent APIs using the `EnableActorEventsAPI` Lotus configuration option + +**Note: If you are a Storage Provider or node operator who does not serve public RPC requests or does not need Ethereum or Event APIs (i.e, if `Fevm.EnableEthRPC = false` and `Events.EnableActorEventsAPI = false`, their default values), you can skip this document as the `ChainIndexer` is already disabled by default**. + +## ChainIndexer Config +### Enablement + +The following must be enabled on an Lotus node before starting as they are disabled by default: + +```toml +[Fevm] +# Enable the ETH RPC APIs. +# This is not required for ChainIndexer support, but ChainIndexer is required if you enable this. + EnableEthRPC = true + +[Events] +# Enable the Actor Events APIs. +# This is not required for ChainIndexer support, but ChainIndexer is required if you enable this. + EnableActorEventsAPI = true + +[ChainIndexer] +# Enable the ChainIndexer, which is required for the ETH RPC APIs and Actor Events APIs. +# If they are enabled, but the ChainIndexer is not, Lotus will exit during startup. +# (ChainIndexer needs to be explicitly enabled to signal to the node operator the extra +# supporting functionality that will now be running.) + EnableIndexer = true +``` + +You can learn more about these configuration options and other configuration options available for the `ChainIndexer` [here](https://github.com/filecoin-project/lotus/blob/master/documentation/en/default-lotus-config.toml). + + +### Garbage Collection + +The `ChainIndexer` includes a garbage collection (GC) mechanism to manage the amount of historical data retained. See the [ChainIndexer size requirements](#backfill-disk-space-requirements). + +By default, GC is disabled to preserve all indexed data. + +To configure GC, use the `GCRetentionEpochs` parameter in the `ChainIndexer` section of your config. + +The ChainIndexer [periodically runs](https://github.com/filecoin-project/lotus/blob/master/chain/index/gc.go#L15) GC if `GCRetentionEpochs` is > 0 and removes indexed data for epochs older than `(current_head_height - GCRetentionEpochs)`. + +```toml +[ChainIndexer] + GCRetentionEpochs = X # Replace X with your desired value +``` + +- Setting `GCRetentionEpochs` to 0 (**default**) disables GC. +- Any positive value enables GC and determines the number of epochs of historical data to retain. + +#### Recommendations + +1. **Archival Nodes**: **Keep GC disabled** (`GCRetentionEpochs` = 0) to retain all indexed data. + +2. **Non-Archival Nodes**: Set `GCRetentionEpochs` to match the amount of chain state your node retains + +**Example:** if your node is configured to retain Filecoin chain state with a Splitstore Hotstore that approximates 2 days of epochs, set `GCRetentionEpochs` to at least `retentionDays * epochsPerDay = 2 * 2880 = 5760`). + +**Warning:** Setting this value below the chain state retention period may degrade RPC performance and reliability because the ChainIndexer will lack data for epochs still present in the chain state. + +**Note:** `Chainstore.Splitstore` is configured in terms of bytes (not epochs) and `ChainIndexer.GCRetentionEpochs` is in terms of epochs (not bytes). For the purposes of this discussion, we're assuming operators have determined `Chainstore.Splitstore.HotStoreMaxSpaceTarget` and `Chainstore.Splitstore.HotStoreMaxSpaceThreshold` values that approximate a certain number days of storage in the Splitstore Hotstore. The guidance here is to make sure this approximation exceeds `ChainIndexer.GCRetentionEpochs`. + +### Removed Options + +**Note: The following config options no longer exist in Lotus and have been removed in favor of the ChainIndexer config options explained above. They can be removed when upgrading to Lotus v1.31.0.** + +```toml +[Fevm] +EthTxHashMappingLifetimeDays + +[Events] +DisableRealTimeFilterAPI +DisableHistoricFilterAPI +DatabasePath + +[Index] +EnableMsgIndex +``` + +The `Fevm.Events` options were marked as deprecated in Lotus 1.26, having been moved to the new top-level `Events` section, and have now been removed with Lotus 1.31. + +* `Fevm.Events.DatabasePath` (no replacement available) +* `Fevm.Events.DisableRealTimeFilterAPI` (no replacement available) +* `Fevm.Events.DisableHistoricFilterAPI` (no replacement available) +* `Fevm.Events.FilterTTL` (use `Events.FilterTTL` intead) +* `Fevm.Events.MaxFilters` (use `Events.MaxFilters` instead) +* `Fevm.Events.MaxFilterResults` (use `Events.MaxFilterResults` instead) +* `Fevm.Events.MaxFilterHeightRange` (use `Events.MaxFilterHeightRange` instead) + +## Upgrade + +### Preparation +One can upgrade/downgrade between [pre-ChainIndexer](#previous-indexing-system) and [with-ChainIndexer](#chainindexer-indexing-system) Lotus versions without conflict because they persist state to different directories and don't rely on each other. No backup is necessary (but extra backups don't hurt). There is still a [backfilling step though when downgrading](#downgrade-steps). + +These upgrade steps assume one has multiple nodes in their fleet and can afford to have a node not handling traffic, potentially for days per [backfill timing below](#backfill-timing). + +One should also check to ensure they have [sufficient disk space](#backfill-disk-space-requirements). + +### Upgrade when using existing `LOTUS_PATH` chain state +* This upgrade path assumes one has an existing node with existing `LOTUS_PATH` chain state they want to keep using and they don't want to import chain state from a snapshot. A prime example is an existing archival node. +* Perform the [preparation steps](#preparation) before proceeding. +* See [here for the snapshot upgrade path](#upgrade-when-importing-chain-state-from-a-snapshot). + +#### Part 1: Create a backfilled ChainIndexer `chainindex.db` +1. **Route traffic away from an initial node** + - Example: prevent a load balancer from routing traffic to a designated node. +2. **Stop the designated Lotus Node** + - Stop the designated Lotus node before starting the upgrade and backfill process. +3. **Update Configuration** + - Modify the Lotus configuration to enable the `ChainIndexer` as described in the [`ChainIndexer Config` section above](#chainindexer-config). +4. **Restart Lotus Node** + - Restart the Lotus node with the new configuration. + - The `ChainIndexer` will begin indexing **real-time chain state changes** immediately in the `${LOTUS_PATH}/chainindex` directory. + - *However, it will not automatically index any historical chain state (i.e., any previously existing chain state prior to the upgrade).* +5. **Backfill** + - See the ["Backfill" section below](#backfill). + - This could potentially take days per [Backfill Timing](#backfill-timing). +6. **Ensure node health** + - Perform whatever steps are usually done to validate a node's health before handling traffic (e.g., log scans, smoke tests) +7. **Route traffic to the backfilled node that is now using ChainIndexer** +8. **Ensure equal or better correctness and performance** + - ChainIndexer-using nodes should have full correctness and better performance when compared to [pre-ChainIndexer](#previous-indexing-system) nodes. + +#### Part 2: Create a copyable `chainindex.db` +[Part 3 below](#part-3-update-other-nodes) is going to use the backfilled `chainindex.db` from above with other nodes so they don't have to undergo as long of a backfill process. That said, this backfilled `chaindex.db` shouldn't be done while the updated-and-backfilled node is running. Options include: +1. Stop the updated-and-backfilled node before copying it. + * `cp ${LOTUS_PATH}/chainindex/chainindex.db /copy/destination/path/chainindex.db` +2. While the node is running, use the `sqlite3` CLI utility (which should be at least version 3.37) to clone it. + * `sqlite3 ${LOTUS_PATH}/chainindex/chainindex.db '.clone /copy/destination/path/chainindex.db'` +Both of these will result in a file `/copy/destination/path/chainindex.db` that can be copied around in part 4 below. + +#### Part 3: Update other nodes +Now that one has a `${LOTUS_PATH}/chainindex/chainindex.db` from a trusted node, it can be copied to additional nodes to expedite bootstrapping. +1. **Route traffic away from the next node to upgrade** +2. **Stop the Lotus Node** +3. **Update Configuration** + - Modify the Lotus configuration to enable the `ChainIndexer` as described in the [`ChainIndexer Config` section above](#chainindexer-config). +4. **Copy `/copy/destination/path/chainindex.db` from the trusted node in [part 2 above](#part-2-create-a-copyable-chainindexdb)** +4. **Restart Lotus Node** + - Restart your Lotus node with the new configuration. + - The `ChainIndexer` will begin indexing **real-time chain state changes** immediately in the `${LOTUS_PATH}/chainindex` directory. + - *However, it will not automatically index the chain state from where the copied-in `chainindex.db` ends. This will need to be done manually.* +5. **Backfill the small data gap from after the copied-in `chainindex.db`** + - See the [`Backfill` section below](#backfill). + - This should be quick since this gaps is presumably on the order of epoch minutes, hours, or days rather than months. +6. **Ensure node health** + - Perform whatever steps are usually done to validate a node's health before handling traffic (e.g., log scans, smoke tests) +7. **Route traffic to this newly upgraded ChainIndexer-enabled node** +8. **Repeat for other nodes that need to upgrade** + +#### Part 4: Cleanup +It's recommended to keep the [pre-ChainIndexer](#previous-indexing-system) indexing database directory (`${LOTUS_PATH}/sqlite`) around until you've confirmed you don't need to [downgrade](#downgrade). After sustained successful operations after the upgrade, the [pre-ChainIndexer](#previous-indexing-system) database directory can be removed to reclaim disk space. + +### Upgrade when importing chain state from a snapshot +Note: this upgrade path assumes one is starting a fresh node and importing chain state with a snapshot (i.e., `lotus daemon --import-snapshot`). A prime example is an operator adding another node to their fleet that has limited history. If not using a snapshot, see the ["upgrade with existing chain state" path](#upgrade-when-using-existing-lotus_path-chain-state). + +1. **Review the [preparation steps](#preparation)** + - The disk space and upgrade times will be much smaller than the ["upgrade with existing chain state" path](#upgrade-when-using-existing-lotus_path-chain-state) assuming this is a non-archival node that is only indexing a limited number of days of epochs. +2. **Ensure the node is stopped and won't take any traffic initially upon starting** + - Example: prevent a load balancer from routing traffic to the node. +3. **Update Configuration** + - Modify the Lotus configuration to enable the `ChainIndexer` as described in the [`ChainIndexer Config` section above](#chainindexer-config). +4. **Start lotus with the snapshot import** + - `lotus daemon --import-snapshot` +5. **Wait for the Lotus daemon to sync** + - As the Lotus daemon syncs the chain, the ChainIndexer will automatically index the synced messages, but it will not automatically sync ETH RPC events and transactions. +6. **Backfill so ETH RPC events and transactions are indexed as well** + - See the ["Backfill" section below](#backfill). + - This will look something like `lotus-shed chainindex validate-backfill --from --to --backfill` + - Example: if the current head is epoch 4360000 and one wants to index a day's worth of epochs (2880), then they'd use `--from 4360000 --to 4357120` +6. **Ensure node health** + - Perform whatever steps are usually done to validate a node's health before handling traffic (e.g., log scans, smoke tests) +7. **Route traffic to the backfilled node that is now using ChainIndexer** + +## Backfill +There is no automated migration from [pre-ChainIndexer indices](#previous-indexing-system) to the [ChainIndex](#chainindexer-indexing-system). Instead one needs to index historical chain state (i.e., backfill), if RPC access to that historical state is required. (If curious, [read why](#wny-isnt-there-an-automated-migration-from-the-previous-indexing-system-to-the-chainindexer-indexing-system).) + +### Backfill Timing + +Backfilling the new `ChainIndexer` was [benchmarked to take approximately ~12 hours per month of epochs on a sample archival node doing no other work](https://github.com/filecoin-project/lotus/issues/12453#issuecomment-2405306468). Your results will vary depending on hardware, network, and competing processes. This means if one is upgrading a FEVM archival node, they should plan on the node being out of production service for ~10 days. Additional nodes to update don't need to go throuh the same time-intensive process though. They can get a `${LOTUS_PATH}/chainindex/chainindex.db` copied from a trusted node per the [upgrade steps](#upgrade). + +### Backfill Disk Space Requirements + +As of 202410, ChainIndexer will accumulate approximately ~340 MiB per day of data, or 10 GiB per month (see [here](https://github.com/filecoin-project/lotus/issues/12453)). + +### `lotus-shed chainindex validate-backfill` CLI tool +The `lotus-shed chainindex validate-backfill` command is a tool for validating and optionally backfilling the chain index over a range of epochs since calling the [`ChainValidateIndex` API](#chainvalidateindex-rpc-api) for a single epoch at a time can be cumbersome, especially when backfilling or validating the index over a range of historical epochs, such as during a backfill. This tool wraps the `ChainValidateIndex` API to efficiently process multiple epochs. + +**Note: This command can only be run when the Lotus daemon is already running with the [`ChainIndexer` enabled](#enablement) as it depends on the `ChainValidateIndex` RPC API.** + +#### Usage + +``` +lotus-shed chainindex validate-backfill --from --to [--backfill] [--log-good] +``` + +The command validates the chain index entries for each epoch in the specified range, checking for missing or inconsistent entries (i.e. the indexed data does not match the actual chain state). If `--backfill` is enabled (which it is by default), it will attempt to backfill any missing entries using the `ChainValidateIndex` API. + +You can learn about how to use the tool with `lotus-shed chainindex validate-backfill -h`. + +Note: If you are using a non-standard Lotus repo directory then you can run the command with `lotus-shed -repo /path/to/lotus/repo chainindex validate-backfill ...`, or by setting the `LOTUS_REPO` environment variable. + +## Regular Checks + +During normal operation, it is possible, but not strictly necessary, to run periodic checks on the index to ensure it remains consistent with the chain state. The ChainIndexer is designed to be resilient and consistent, but unconsidered edge-cases, or bugs, could cause the index to become inconsistent. + +The `lotus-shed chainindex validate-backfill` command can be used to validate the index over a range of epochs and can be run periodically via cron, systemd timers, or some other means, to ensure the index remains consistent. An example bash script one could use to validate the index over the last 24 hours every 24 hours is provided below: + + +```bash +#!/bin/bash + +LOGFILE="/var/log/lotus_chainindex_validate.log" +current_date=$(date '+%Y-%m-%d %H:%M:%S') + +# Configurable setting for backfill option, set to 'false' to simply report errors as we should +# not expect regular errors in the index. +BACKFILL_OPTION=false + +# Path to the lotus-shed binary +LOTUS_SHED_PATH="/path/to/lotus-shed" + +# Get the current chain head epoch number +start_epoch=$(lotus chain head --height) +# Subtract 1 to account for deferred execution +start_epoch=$((start_epoch - 1)) + +# Define the number of epochs for validation, set to 3000 to validate the last 24 hours plus some buffer +epochs_to_validate=3000 + +# Calculate the end epoch +end_epoch=$((start_epoch - epochs_to_validate + 1)) + +# Run the Lotus chainindex validate-backfill command +validation_output=$("$LOTUS_SHED_PATH" chainindex validate-backfill --from="$start_epoch" --to="$end_epoch" --backfill="$BACKFILL_OPTION" --quiet 2>&1) + +# Check the exit status of the command to determine if errors occurred +if [ $? -ne 0 ]; then + # Log the error with a timestamp + { + echo "[$current_date] Validation error:" + echo "$validation_output" + } >> "$LOGFILE" +else + echo "[$current_date] Validation completed successfully." >> "$LOGFILE" +fi +``` + +Note that this script simply logs any errors that occur during the validation process. It is up to the operator to determine the appropriate response to any errors that occur, including reporting potential bugs to Lotus maintainers. A further enhancement could be to send an alert to an operator if an error occurs. + +## Downgrade Steps + +In case you need to downgrade to the [previous indexing system](#previous-indexing-system), follow these steps: + +1. Prevent the node from receiving traffic. +2. Stop your Lotus node. +3. Download or build a Lotus binary for the rollback version which has the implementation of the old `EthTxHashLookup`, `MsgIndex`, and `EventIndex` indices. +4. Ensure that you've set the correct config for the existing `EthTxHashLookup`, `MsgIndex`, and `EventIndex` indices in the `config.toml` file. +5. Restart your Lotus node. +6. Backfill the `EthTxHashLookup`, `MsgIndex`, and `EventIndex` indices using the `lotus-shed index backfill-*` CLI tooling available in the [previous indexing system](#previous-indexing-system) for the range of epochs between the upgrade to `ChainIndexer` and the rollback of `ChainIndexer`. +7. Route traffic back to the node. + +## Terminology +### Previous Indexing System +* This corresponds to the indexing system used in Lotus versions before v1.31.0. +* It has been replaced by the [ChainIndexer](#chainindexer-indexing-system). +* It was composed of three indexers using three separate databases: [`EthTxHashLookup`](https://github.com/filecoin-project/lotus/blob/v1.31.0/chain/ethhashlookup/eth_transaction_hash_lookup.go), [`MsgIndex`](https://github.com/filecoin-project/lotus/blob/v1.31.0/chain/index/msgindex.go), and [`EventIndex`](https://github.com/filecoin-project/lotus/blob/v1.31.0/chain/events/filter/index.go). +* It persisted state to the [removed option](#removed-options) for `Events.DatabasePath`, which defaulted to `${LOTUS_PATH}/sqlite`. +* It had CLI backfill tooling: `lotus-shed index backfill-*` + +### ChainIndexer Indexing System +* This corresponds to the indexing system used in Lotus versions v1.31.0 onwards. +* It replaced the [previous indexing system](#previous-indexing-system). +* It is composed of a single indexer, [`ChainIndexer`](https://github.com/filecoin-project/lotus/blob/master/chain/index/indexer.go), using a [single database for transactions, messages, and events](https://github.com/filecoin-project/lotus/blob/master/chain/index/ddls.go). +* It persists state to `${LOTUS_PATH}/chainindex`. +* It has this CLI backfill tooling: [`lotus-shed chainindex validate-backfill`](#lotus-shed-chainindex-validate-backfill-cli-tool) +* **Storage requirements:** See the [backfill disk space requirements](#backfill-disk-space-requirements). +* **Backfil times:** See the [backfill timing](#backfill-timing). + +## Appendix + +### Why isn't there an automated migration from the [previous indexing system](#previous-indexing-system) to the [ChainIndexer indexing system](#chainindexer-indexing-system)? + +The decision to not invest here ultimately comes down to the development-time cost vs. benefit ratio. + +For achival nodes, we don't have the confidence that the [previous indexing system](#previous-indexing-system) has the correct data to bootstrap from. In 2024, Lotus maintainers have fixed multiple bugs in the [previous indexing system](#previous-indexing-system), but they still see reports of missing data, mismatched event index counts, etc. Investing here in a migration isn't guaranteed to yield a correct index. As a result, one would still need to perform the [backfill steps](#backfill) to validate and correct the data anyway. While this should be faster having partially correct data than no data, it would still require an archival node to take an outage on the order of days which isn't good enough. + +The schemas of [the old fragmented Indices](#previous-indexing-system) don't naturally map to the schema of the [ChainIndexer](#chainindexer-indexing-system). There would be additional data wrangling work to ultimately get this right. + +[Backfilling](#backfill) is a one time cost. If an operator provider is running multiple nodes, they only need to do it on one node and can then simply copy over the Index to the other node per [the upgrade steps](#upgrade-steps). The new `chainindex.db` copy can also be shared among operators if there is a trust relationship. + +Note that this lack of an automated migration is primarily a concern for the relatively small-in-number archival nodes. It isn't as much of a concern for snapshot-synced nodes. For snapshot-synced nodes with only a portion of the chain state because they only serve queries going back a few days can expect the backfill take closer to an hour per [backfill timing](#backfill-timing). + +### `ChainValidateIndex` RPC API + +Please refer to the [Lotus API documentation](https://github.com/filecoin-project/lotus/blob/master/documentation/en/api-v1-unstable-methods.md) for detailed documentation of the `ChainValidateIndex` JSON RPC API. + +The `ChainValidateIndex` JSON RPC API serves a dual purpose: it validates/diagnoses the integrity of the index at a specific epoch (i.e., it ensures consistency between indexed data and actual chain state), while also providing the option to backfill the `ChainIndexer` if it does not have data for the specified epoch. + +The `ChainValidateIndex` RPC API is available for use once the Lotus daemon has started with `ChainIndexer` [enabled](#enablement). + +Here are some examples of how to use the `ChainValidateIndex` JSON RPC API for validating/ backfilling the index: + +1) Validating the index for an epoch that is a NULL round: + + ```bash + curl -X POST -H "Content-Type: application/json" --data '{ + "jsonrpc": "2.0", + "method": "Filecoin.ChainValidateIndex", + "params": [1954383, false], + "id": 1 +}' http://localhost:1234/rpc/v1 | jq . +``` +```json +{ + "id": 1, + "jsonrpc": "2.0", + "result": { + "TipSetKey": [], + "Height": 1954383, + "IndexedMessagesCount": 0, + "IndexedEventsCount": 0, + "Backfilled": false, + "IsNullRound": true + } +} +``` + +2) Validating the Index for an epoch for which the Indexer has missing data with backfilling disabled: + +```bash +curl -X POST -H "Content-Type: application/json" --data '{ + "jsonrpc": "2.0", + "method": "Filecoin.ChainValidateIndex", + "params": [1995103, false], + "id": 1 +}' http://localhost:1234/rpc/v1 | jq . +``` +```json +{ + "error": { + "code": 1, + "message": "missing tipset at height 1995103 in the chain index, set backfill flag to true to fix" + }, + "id": 1, + "jsonrpc": "2.0" +} +``` + +3) Validating the Index for an epoch for which the Indexer has missing data with backfilling enabled: + +```bash +curl -X POST -H "Content-Type: application/json" --data '{ + "jsonrpc": "2.0", + "method": "Filecoin.ChainValidateIndex", + "params": [1995103, true], + "id": 1 +}' http://localhost:1234/rpc/v1 | jq . +``` +```json +{ + "id": 1, + "jsonrpc": "2.0", + "result": { + "TipSetKey": [ + { + "/": "bafy2bzacebvzbpbdwxsclwyorlzclv6cbsvcbtq34sajow2sn7mnksy3wehew" + }, + { + "/": "bafy2bzacedgei4ve3spkfp3oou5oajwd5cogn7lljsuvoj644fgj3gv7luamu" + }, + { + "/": "bafy2bzacebbpcnjoi46obpaheylyxfy5y2lrtdsyglqw3hx2qg64quip5u76s" + } + ], + "Height": 1995103, + "IndexedMessagesCount": 0, + "IndexedEventsCount": 0, + "Backfilled": true, + "IsNullRound": false + } +} +``` diff --git a/documentation/en/default-lotus-config.toml b/documentation/en/default-lotus-config.toml index 7f39f23a5b8..8d3eda9d496 100644 --- a/documentation/en/default-lotus-config.toml +++ b/documentation/en/default-lotus-config.toml @@ -221,20 +221,13 @@ [Fevm] - # EnableEthRPC enables eth_ rpc, and enables storing a mapping of eth transaction hashes to filecoin message Cids. - # This will also enable the RealTimeFilterAPI and HistoricFilterAPI by default, but they can be disabled by config options above. + # EnableEthRPC enables eth_ RPC methods. + # Note: Setting this to true will also require that ChainIndexer is enabled, otherwise it will cause an error at startup. # # type: bool # env var: LOTUS_FEVM_ENABLEETHRPC #EnableEthRPC = false - # EthTxHashMappingLifetimeDays the transaction hash lookup database will delete mappings that have been stored for more than x days - # Set to 0 to keep all mappings - # - # type: int - # env var: LOTUS_FEVM_ETHTXHASHMAPPINGLIFETIMEDAYS - #EthTxHashMappingLifetimeDays = 0 - # EthTraceFilterMaxResults sets the maximum results returned per request by trace_filter # # type: uint64 @@ -253,25 +246,9 @@ [Events] - # DisableRealTimeFilterAPI will disable the RealTimeFilterAPI that can create and query filters for actor events as they are emitted. - # The API is enabled when Fevm.EnableEthRPC or EnableActorEventsAPI is true, but can be disabled selectively with this flag. - # - # type: bool - # env var: LOTUS_EVENTS_DISABLEREALTIMEFILTERAPI - #DisableRealTimeFilterAPI = false - - # DisableHistoricFilterAPI will disable the HistoricFilterAPI that can create and query filters for actor events - # that occurred in the past. HistoricFilterAPI maintains a queryable index of events. - # The API is enabled when Fevm.EnableEthRPC or EnableActorEventsAPI is true, but can be disabled selectively with this flag. - # - # type: bool - # env var: LOTUS_EVENTS_DISABLEHISTORICFILTERAPI - #DisableHistoricFilterAPI = false - # EnableActorEventsAPI enables the Actor events API that enables clients to consume events # emitted by (smart contracts + built-in Actors). - # This will also enable the RealTimeFilterAPI and HistoricFilterAPI by default, but they can be - # disabled by setting their respective Disable* options. + # Note: Setting this to true will also require that ChainIndexer is enabled, otherwise it will cause an error at startup. # # type: bool # env var: LOTUS_EVENTS_ENABLEACTOREVENTSAPI @@ -307,23 +284,64 @@ # env var: LOTUS_EVENTS_MAXFILTERHEIGHTRANGE #MaxFilterHeightRange = 2880 - # DatabasePath is the full path to a sqlite database that will be used to index actor events to - # support the historic filter APIs. If the database does not exist it will be created. The directory containing - # the database must already exist and be writeable. If a relative path is provided here, sqlite treats it as - # relative to the CWD (current working directory). + +[ChainIndexer] + # EnableIndexer controls whether the chain indexer is active. + # The chain indexer is responsible for indexing tipsets, messages, and events from the chain state. + # It is a crucial component for optimizing Lotus RPC response times. + # + # Default: false (indexer is disabled) + # + # Setting this to true will enable the indexer, which will significantly improve RPC performance. + # It is strongly recommended to keep this set to true if you are an RPC provider. # - # type: string - # env var: LOTUS_EVENTS_DATABASEPATH - #DatabasePath = "" + # type: bool + # env var: LOTUS_CHAININDEXER_ENABLEINDEXER + #EnableIndexer = false + # GCRetentionEpochs specifies the number of epochs for which data is retained in the Indexer. + # The garbage collection (GC) process removes data older than this retention period. + # Setting this to 0 disables GC, preserving all historical data indefinitely. + # + # If set, the minimum value must be greater than builtin.EpochsInDay (i.e. "2880" epochs for mainnet). + # This ensures a reasonable retention period for the indexed data. + # + # Default: 0 (GC disabled) + # + # type: int64 + # env var: LOTUS_CHAININDEXER_GCRETENTIONEPOCHS + #GCRetentionEpochs = 0 -[Index] - # EXPERIMENTAL FEATURE. USE WITH CAUTION - # EnableMsgIndex enables indexing of messages on chain. + # ReconcileEmptyIndex determines whether to reconcile the index with the chain state + # during startup when the index is empty. + # + # When set to true: + # - On startup, if the index is empty, the indexer will index the available + # chain state on the node albeit within the MaxReconcileTipsets limit. + # + # When set to false: + # - The indexer will not automatically re-index the chain state on startup if the index is empty. + # + # Default: false + # + # Note: The number of tipsets reconciled (i.e. indexed) during this process can be + # controlled using the MaxReconcileTipsets option. # # type: bool - # env var: LOTUS_INDEX_ENABLEMSGINDEX - #EnableMsgIndex = false + # env var: LOTUS_CHAININDEXER_RECONCILEEMPTYINDEX + #ReconcileEmptyIndex = false + + # MaxReconcileTipsets limits the number of tipsets to reconcile with the chain during startup. + # It represents the maximum number of tipsets to index from the chain state that are absent in the index. + # + # Default: 3 * epochsPerDay (approximately 3 days of chain history) + # + # Note: Setting this value too low may result in incomplete indexing, while setting it too high + # may increase startup time. + # + # type: uint64 + # env var: LOTUS_CHAININDEXER_MAXRECONCILETIPSETS + #MaxReconcileTipsets = 8640 [FaultReporter] diff --git a/itests/eth_config_test.go b/itests/eth_config_test.go index 8b74d011aa2..ecd0379e2b2 100644 --- a/itests/eth_config_test.go +++ b/itests/eth_config_test.go @@ -17,7 +17,7 @@ func TestEthFilterAPIDisabledViaConfig(t *testing.T) { kit.QuietMiningLogs() - // pass kit.DisableEthRPC() so RealTimeFilterAPI will not be enabled + // pass kit.DisableEthRPC() to disable ETH RPC client, _, _ := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ThroughRPC(), kit.DisableEthRPC()) _, err := client.EthNewPendingTransactionFilter(ctx) diff --git a/itests/eth_filter_test.go b/itests/eth_filter_test.go index 16991069c9c..875901aa00b 100644 --- a/itests/eth_filter_test.go +++ b/itests/eth_filter_test.go @@ -524,6 +524,56 @@ func TestEthGetLogsBasic(t *testing.T) { } AssertEthLogs(t, rctLogs, expected, received) + + head, err := client.ChainHead(ctx) + require.NoError(err) + + for height := 0; height < int(head.Height()); height++ { + // for each tipset + ts, err := client.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(height), types.EmptyTSK) + require.NoError(err) + + if ts.Height() != abi.ChainEpoch(height) { + iv, err := client.ChainValidateIndex(ctx, abi.ChainEpoch(height), false) + require.NoError(err) + require.True(iv.IsNullRound) + t.Logf("tipset %d is a null round", height) + continue + } + + expectedValidation := types.IndexValidation{ + TipSetKey: ts.Key(), + Height: ts.Height(), + IndexedMessagesCount: 0, + IndexedEventsCount: 0, + IndexedEventEntriesCount: 0, + Backfilled: false, + IsNullRound: false, + } + messages, err := client.ChainGetMessagesInTipset(ctx, ts.Key()) + require.NoError(err) + expectedValidation.IndexedMessagesCount = uint64(len(messages)) + for _, m := range messages { + receipt, err := client.StateSearchMsg(ctx, types.EmptyTSK, m.Cid, -1, false) + require.NoError(err) + require.NotNil(receipt) + // receipt + if receipt.Receipt.EventsRoot != nil { + events, err := client.ChainGetEvents(ctx, *receipt.Receipt.EventsRoot) + require.NoError(err) + expectedValidation.IndexedEventsCount += uint64(len(events)) + for _, event := range events { + expectedValidation.IndexedEventEntriesCount += uint64(len(event.Entries)) + } + } + } + + t.Logf("tipset %d: %+v", height, expectedValidation) + + iv, err := client.ChainValidateIndex(ctx, abi.ChainEpoch(height), false) + require.NoError(err) + require.Equal(iv, &expectedValidation) + } } func TestEthSubscribeLogsNoTopicSpec(t *testing.T) { diff --git a/itests/eth_transactions_test.go b/itests/eth_transactions_test.go index a39251b0fb6..3902345c857 100644 --- a/itests/eth_transactions_test.go +++ b/itests/eth_transactions_test.go @@ -745,12 +745,6 @@ func TestTraceTransaction(t *testing.T) { require.Contains(t, err.Error(), "transaction not found") require.Nil(t, traces) - // EthTraceTransaction errors when a trace for pending transactions is requested - traces, err = client.EthTraceTransaction(ctx, hash.String()) - require.Error(t, err) - require.Contains(t, err.Error(), "no trace for pending transactions") - require.Nil(t, traces) - receipt, err := client.EVM().WaitTransaction(ctx, hash) require.NoError(t, err) require.NotNil(t, receipt) diff --git a/itests/kit/node_opts.go b/itests/kit/node_opts.go index ad1f7e3edb4..1bfd4550977 100644 --- a/itests/kit/node_opts.go +++ b/itests/kit/node_opts.go @@ -90,6 +90,7 @@ var DefaultNodeOpts = nodeOpts{ // test defaults cfg.Fevm.EnableEthRPC = true + cfg.ChainIndexer.EnableIndexer = true cfg.Events.MaxFilterHeightRange = math.MaxInt64 cfg.Events.EnableActorEventsAPI = true @@ -103,6 +104,8 @@ var DefaultNodeOpts = nodeOpts{ cfg.Libp2p.ConnMgrLow = 1024 cfg.Libp2p.ConnMgrHigh = 2048 cfg.Libp2p.ConnMgrGrace = config.Duration(time.Hour) + cfg.ChainIndexer.ReconcileEmptyIndex = true + cfg.ChainIndexer.MaxReconcileTipsets = 10000 return nil }, }, diff --git a/itests/msgindex_test.go b/itests/msgindex_test.go deleted file mode 100644 index d9ed752797e..00000000000 --- a/itests/msgindex_test.go +++ /dev/null @@ -1,124 +0,0 @@ -package itests - -import ( - "context" - "os" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-state-types/big" - "github.com/filecoin-project/go-state-types/exitcode" - - lapi "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/index" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/itests/kit" - "github.com/filecoin-project/lotus/node" -) - -func init() { - // adjust those to make tests snappy - index.CoalesceMinDelay = time.Millisecond - index.CoalesceMaxDelay = 10 * time.Millisecond - index.CoalesceMergeInterval = time.Millisecond -} - -func testMsgIndex( - t *testing.T, - name string, - run func(t *testing.T, makeMsgIndex func(cs *store.ChainStore) (index.MsgIndex, error)), - check func(t *testing.T, i int, msgIndex index.MsgIndex), -) { - - // create the message indices in the test context - var mx sync.Mutex - var tmpDirs []string - var msgIndices []index.MsgIndex - - t.Cleanup(func() { - for _, msgIndex := range msgIndices { - _ = msgIndex.Close() - } - - for _, tmp := range tmpDirs { - _ = os.RemoveAll(tmp) - } - }) - - makeMsgIndex := func(cs *store.ChainStore) (index.MsgIndex, error) { - var err error - tmp := t.TempDir() - msgIndex, err := index.NewMsgIndex(context.Background(), tmp+"/msgindex.db", cs) - if err == nil { - mx.Lock() - tmpDirs = append(tmpDirs, tmp) - msgIndices = append(msgIndices, msgIndex) - mx.Unlock() - } - return msgIndex, err - } - - t.Run(name, func(t *testing.T) { - run(t, makeMsgIndex) - }) - - if len(msgIndices) == 0 { - t.Fatal("no message indices") - } - - for i, msgIndex := range msgIndices { - check(t, i, msgIndex) - } -} - -func checkNonEmptyMsgIndex(t *testing.T, _ int, msgIndex index.MsgIndex) { - mi, ok := msgIndex.(interface{ CountMessages() (int64, error) }) - if !ok { - t.Fatal("index does not allow counting") - } - count, err := mi.CountMessages() - require.NoError(t, err) - require.NotEqual(t, count, 0) -} - -func TestMsgIndex(t *testing.T) { - testMsgIndex(t, "testSearchMsg", testSearchMsgWithIndex, checkNonEmptyMsgIndex) -} - -func testSearchMsgWithIndex(t *testing.T, makeMsgIndex func(cs *store.ChainStore) (index.MsgIndex, error)) { - // copy of apiSuite.testSearchMsgWith; needs to be copied or else CI is angry, tests are built individually there - ctx := context.Background() - - full, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ConstructorOpts(node.Override(new(index.MsgIndex), makeMsgIndex))) - - senderAddr, err := full.WalletDefaultAddress(ctx) - require.NoError(t, err) - - msg := &types.Message{ - From: senderAddr, - To: senderAddr, - Value: big.Zero(), - } - - ens.BeginMining(100 * time.Millisecond) - - sm, err := full.MpoolPushMessage(ctx, msg, nil) - require.NoError(t, err) - - //stm: @CHAIN_STATE_WAIT_MSG_001 - res, err := full.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true) - require.NoError(t, err) - - require.Equal(t, exitcode.Ok, res.Receipt.ExitCode, "message not successful") - - //stm: @CHAIN_STATE_SEARCH_MSG_001 - searchRes, err := full.StateSearchMsg(ctx, types.EmptyTSK, sm.Cid(), lapi.LookbackNoLimit, true) - require.NoError(t, err) - require.NotNil(t, searchRes) - - require.Equalf(t, res.TipSet, searchRes.TipSet, "search ts: %s, different from wait ts: %s", searchRes.TipSet, res.TipSet) -} diff --git a/lib/sqlite/sqlite.go b/lib/sqlite/sqlite.go index cb489284c9a..96d3b5fa9ba 100644 --- a/lib/sqlite/sqlite.go +++ b/lib/sqlite/sqlite.go @@ -11,6 +11,7 @@ import ( "time" logging "github.com/ipfs/go-log/v2" + _ "github.com/mattn/go-sqlite3" "golang.org/x/xerrors" ) @@ -22,12 +23,11 @@ var pragmas = []string{ "PRAGMA synchronous = normal", "PRAGMA temp_store = memory", "PRAGMA mmap_size = 30000000000", - "PRAGMA page_size = 32768", "PRAGMA auto_vacuum = NONE", "PRAGMA automatic_index = OFF", "PRAGMA journal_mode = WAL", - "PRAGMA wal_autocheckpoint = 256", // checkpoint @ 256 pages - "PRAGMA journal_size_limit = 0", // always reset journal and wal files + "PRAGMA journal_size_limit = 0", // always reset journal and wal files + "PRAGMA foreign_keys = ON", } const metaTableDdl = `CREATE TABLE IF NOT EXISTS _meta ( @@ -45,30 +45,37 @@ func metaDdl(version uint64) []string { } // Open opens a database at the given path. If the database does not exist, it will be created. -func Open(path string) (*sql.DB, bool, error) { +func Open(path string) (*sql.DB, error) { if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { - return nil, false, xerrors.Errorf("error creating database base directory [@ %s]: %w", path, err) + return nil, xerrors.Errorf("error creating database base directory [@ %s]: %w", path, err) } _, err := os.Stat(path) if err != nil && !errors.Is(err, fs.ErrNotExist) { - return nil, false, xerrors.Errorf("error checking file status for database [@ %s]: %w", path, err) + return nil, xerrors.Errorf("error checking file status for database [@ %s]: %w", path, err) } - exists := err == nil db, err := sql.Open("sqlite3", path+"?mode=rwc") if err != nil { - return nil, false, xerrors.Errorf("error opening database [@ %s]: %w", path, err) + return nil, xerrors.Errorf("error opening database [@ %s]: %w", path, err) } for _, pragma := range pragmas { if _, err := db.Exec(pragma); err != nil { _ = db.Close() - return nil, false, xerrors.Errorf("error setting database pragma %q: %w", pragma, err) + return nil, xerrors.Errorf("error setting database pragma %q: %w", pragma, err) } } - return db, exists, nil + var foreignKeysEnabled int + if err := db.QueryRow("PRAGMA foreign_keys;").Scan(&foreignKeysEnabled); err != nil { + return nil, xerrors.Errorf("failed to check foreign keys setting: %w", err) + } + if foreignKeysEnabled == 0 { + return nil, xerrors.Errorf("foreign keys are not enabled for database [@ %s]", path) + } + + return db, nil } // InitDb initializes the database by checking whether it needs to be created or upgraded. diff --git a/lib/sqlite/sqlite_test.go b/lib/sqlite/sqlite_test.go index bda6432f5e6..f492b092a5e 100644 --- a/lib/sqlite/sqlite_test.go +++ b/lib/sqlite/sqlite_test.go @@ -32,9 +32,8 @@ func TestSqlite(t *testing.T) { tmpDir := t.TempDir() dbPath := filepath.Join(tmpDir, "/test.db") - db, exists, err := sqlite.Open(dbPath) + db, err := sqlite.Open(dbPath) req.NoError(err) - req.False(exists) req.NotNil(db) err = sqlite.InitDb(context.Background(), "testdb", db, ddl, nil) @@ -95,9 +94,8 @@ func TestSqlite(t *testing.T) { // open again, check contents is the same - db, exists, err = sqlite.Open(dbPath) + db, err = sqlite.Open(dbPath) req.NoError(err) - req.True(exists) req.NotNil(db) err = sqlite.InitDb(context.Background(), "testdb", db, ddl, nil) @@ -113,9 +111,9 @@ func TestSqlite(t *testing.T) { // open again, with a migration - db, exists, err = sqlite.Open(dbPath) + db, err = sqlite.Open(dbPath) req.NoError(err) - req.True(exists) + req.NotNil(db) req.NotNil(db) migration1 := func(ctx context.Context, tx *sql.Tx) error { @@ -156,9 +154,8 @@ func TestSqlite(t *testing.T) { // open again, with another migration - db, exists, err = sqlite.Open(dbPath) + db, err = sqlite.Open(dbPath) req.NoError(err) - req.True(exists) req.NotNil(db) migration2 := func(ctx context.Context, tx *sql.Tx) error { diff --git a/node/builder.go b/node/builder.go index 94fe170cc21..7d03e9593a4 100644 --- a/node/builder.go +++ b/node/builder.go @@ -25,7 +25,6 @@ import ( "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/beacon" - "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/journal" "github.com/filecoin-project/lotus/journal/alerting" @@ -129,6 +128,8 @@ const ( StoreEventsKey + InitChainIndexerKey + _nInvokes // keep this last ) @@ -368,7 +369,6 @@ func Test() Option { Unset(RunPeerMgrKey), Unset(new(*peermgr.PeerMgr)), Override(new(beacon.Schedule), testing.RandomBeacon), - Override(new(index.MsgIndex), modules.DummyMsgIndex), ) } diff --git a/node/builder_chain.go b/node/builder_chain.go index d367ab4e3eb..340a5e8e035 100644 --- a/node/builder_chain.go +++ b/node/builder_chain.go @@ -141,9 +141,12 @@ var ChainNode = Options( Override(new(full.StateModuleAPI), From(new(api.Gateway))), Override(new(stmgr.StateManagerAPI), rpcstmgr.NewRPCStateManager), Override(new(full.EthModuleAPI), From(new(api.Gateway))), - Override(new(full.EthTxHashManager), &full.EthTxHashManagerDummy{}), Override(new(full.EthEventAPI), From(new(api.Gateway))), Override(new(full.ActorEventAPI), From(new(api.Gateway))), + + Override(new(index.Indexer), modules.ChainIndexer(config.ChainIndexerConfig{ + EnableIndexer: false, + })), ), // Full node API / service startup @@ -176,6 +179,13 @@ func ConfigFullNode(c interface{}) Option { return Error(xerrors.Errorf("invalid config from repo, got: %T", c)) } + if cfg.Fevm.EnableEthRPC && !cfg.ChainIndexer.EnableIndexer { + return Error(xerrors.New("chain indexer must be enabled if ETH RPC is enabled")) + } + if cfg.Events.EnableActorEventsAPI && !cfg.ChainIndexer.EnableIndexer { + return Error(xerrors.New("chain indexer must be enabled if actor events API is enabled")) + } + return Options( ConfigCommon(&cfg.Common, build.NodeUserVersion()), @@ -240,7 +250,7 @@ func ConfigFullNode(c interface{}) Option { // If the Eth JSON-RPC is enabled, enable storing events at the ChainStore. // This is the case even if real-time and historic filtering are disabled, // as it enables us to serve logs in eth_getTransactionReceipt. - If(cfg.Fevm.EnableEthRPC || cfg.Events.EnableActorEventsAPI, Override(StoreEventsKey, modules.EnableStoringEvents)), + If(cfg.Fevm.EnableEthRPC || cfg.Events.EnableActorEventsAPI || cfg.ChainIndexer.EnableIndexer, Override(StoreEventsKey, modules.EnableStoringEvents)), If(cfg.Wallet.RemoteBackend != "", Override(new(*remotewallet.RemoteWallet), remotewallet.SetupRemoteWallet(cfg.Wallet.RemoteBackend)), @@ -263,14 +273,12 @@ func ConfigFullNode(c interface{}) Option { If(cfg.Fevm.EnableEthRPC, Override(new(*full.EthEventHandler), modules.EthEventHandler(cfg.Events, cfg.Fevm.EnableEthRPC)), - Override(new(full.EthTxHashManager), modules.EthTxHashManager(cfg.Fevm)), Override(new(full.EthModuleAPI), modules.EthModuleAPI(cfg.Fevm)), Override(new(full.EthEventAPI), From(new(*full.EthEventHandler))), ), If(!cfg.Fevm.EnableEthRPC, Override(new(full.EthModuleAPI), &full.EthModuleDummy{}), Override(new(full.EthEventAPI), &full.EthModuleDummy{}), - Override(new(full.EthTxHashManager), &full.EthTxHashManagerDummy{}), ), If(cfg.Events.EnableActorEventsAPI, @@ -281,14 +289,18 @@ func ConfigFullNode(c interface{}) Option { ), ), - // enable message index for full node when configured by the user, otherwise use dummy. - If(cfg.Index.EnableMsgIndex, Override(new(index.MsgIndex), modules.MsgIndex)), - If(!cfg.Index.EnableMsgIndex, Override(new(index.MsgIndex), modules.DummyMsgIndex)), - // enable fault reporter when configured by the user If(cfg.FaultReporter.EnableConsensusFaultReporter, Override(ConsensusReporterKey, modules.RunConsensusFaultReporter(cfg.FaultReporter)), ), + + ApplyIf(isFullNode, + Override(new(index.Indexer), modules.ChainIndexer(cfg.ChainIndexer)), + Override(new(full.ChainIndexerAPI), modules.ChainIndexHandler(cfg.ChainIndexer)), + If(cfg.ChainIndexer.EnableIndexer, + Override(InitChainIndexerKey, modules.InitChainIndexer), + ), + ), ) } diff --git a/node/config/def.go b/node/config/def.go index cc390371302..e6bdc04bdb8 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -83,19 +83,22 @@ func DefaultFullNode() *FullNode { }, }, Fevm: FevmConfig{ - EnableEthRPC: false, - EthTxHashMappingLifetimeDays: 0, - EthTraceFilterMaxResults: 500, - EthBlkCacheSize: 500, + EnableEthRPC: false, + EthTraceFilterMaxResults: 500, + EthBlkCacheSize: 500, }, Events: EventsConfig{ - DisableRealTimeFilterAPI: false, - DisableHistoricFilterAPI: false, - EnableActorEventsAPI: false, - FilterTTL: Duration(time.Hour * 1), - MaxFilters: 100, - MaxFilterResults: 10000, - MaxFilterHeightRange: 2880, // conservative limit of one day + EnableActorEventsAPI: false, + FilterTTL: Duration(time.Hour * 1), + MaxFilters: 100, + MaxFilterResults: 10000, + MaxFilterHeightRange: 2880, // conservative limit of one day + }, + ChainIndexer: ChainIndexerConfig{ + EnableIndexer: false, + GCRetentionEpochs: 0, + ReconcileEmptyIndex: false, + MaxReconcileTipsets: 3 * builtin.EpochsInDay, }, } } diff --git a/node/config/doc_gen.go b/node/config/doc_gen.go index 6420c0f5f14..d448f5b9ab3 100644 --- a/node/config/doc_gen.go +++ b/node/config/doc_gen.go @@ -71,6 +71,65 @@ your node if metadata log is disabled`, Comment: ``, }, }, + "ChainIndexerConfig": { + { + Name: "EnableIndexer", + Type: "bool", + + Comment: `EnableIndexer controls whether the chain indexer is active. +The chain indexer is responsible for indexing tipsets, messages, and events from the chain state. +It is a crucial component for optimizing Lotus RPC response times. + +Default: false (indexer is disabled) + +Setting this to true will enable the indexer, which will significantly improve RPC performance. +It is strongly recommended to keep this set to true if you are an RPC provider.`, + }, + { + Name: "GCRetentionEpochs", + Type: "int64", + + Comment: `GCRetentionEpochs specifies the number of epochs for which data is retained in the Indexer. +The garbage collection (GC) process removes data older than this retention period. +Setting this to 0 disables GC, preserving all historical data indefinitely. + +If set, the minimum value must be greater than builtin.EpochsInDay (i.e. "2880" epochs for mainnet). +This ensures a reasonable retention period for the indexed data. + +Default: 0 (GC disabled)`, + }, + { + Name: "ReconcileEmptyIndex", + Type: "bool", + + Comment: `ReconcileEmptyIndex determines whether to reconcile the index with the chain state +during startup when the index is empty. + +When set to true: +- On startup, if the index is empty, the indexer will index the available +chain state on the node albeit within the MaxReconcileTipsets limit. + +When set to false: +- The indexer will not automatically re-index the chain state on startup if the index is empty. + +Default: false + +Note: The number of tipsets reconciled (i.e. indexed) during this process can be +controlled using the MaxReconcileTipsets option.`, + }, + { + Name: "MaxReconcileTipsets", + Type: "uint64", + + Comment: `MaxReconcileTipsets limits the number of tipsets to reconcile with the chain during startup. +It represents the maximum number of tipsets to index from the chain state that are absent in the index. + +Default: 3 * epochsPerDay (approximately 3 days of chain history) + +Note: Setting this value too low may result in incomplete indexing, while setting it too high +may increase startup time.`, + }, + }, "Chainstore": { { Name: "EnableSplitstore", @@ -114,29 +173,13 @@ your node if metadata log is disabled`, }, }, "EventsConfig": { - { - Name: "DisableRealTimeFilterAPI", - Type: "bool", - - Comment: `DisableRealTimeFilterAPI will disable the RealTimeFilterAPI that can create and query filters for actor events as they are emitted. -The API is enabled when Fevm.EnableEthRPC or EnableActorEventsAPI is true, but can be disabled selectively with this flag.`, - }, - { - Name: "DisableHistoricFilterAPI", - Type: "bool", - - Comment: `DisableHistoricFilterAPI will disable the HistoricFilterAPI that can create and query filters for actor events -that occurred in the past. HistoricFilterAPI maintains a queryable index of events. -The API is enabled when Fevm.EnableEthRPC or EnableActorEventsAPI is true, but can be disabled selectively with this flag.`, - }, { Name: "EnableActorEventsAPI", Type: "bool", Comment: `EnableActorEventsAPI enables the Actor events API that enables clients to consume events emitted by (smart contracts + built-in Actors). -This will also enable the RealTimeFilterAPI and HistoricFilterAPI by default, but they can be -disabled by setting their respective Disable* options.`, +Note: Setting this to true will also require that ChainIndexer is enabled, otherwise it will cause an error at startup.`, }, { Name: "FilterTTL", @@ -168,15 +211,6 @@ of filters per connection.`, Comment: `MaxFilterHeightRange specifies the maximum range of heights that can be used in a filter (to avoid querying the entire chain)`, }, - { - Name: "DatabasePath", - Type: "string", - - Comment: `DatabasePath is the full path to a sqlite database that will be used to index actor events to -support the historic filter APIs. If the database does not exist it will be created. The directory containing -the database must already exist and be writeable. If a relative path is provided here, sqlite treats it as -relative to the CWD (current working directory).`, - }, }, "FaultReporterConfig": { { @@ -220,15 +254,8 @@ rewards. This address should have adequate funds to cover gas fees.`, Name: "EnableEthRPC", Type: "bool", - Comment: `EnableEthRPC enables eth_ rpc, and enables storing a mapping of eth transaction hashes to filecoin message Cids. -This will also enable the RealTimeFilterAPI and HistoricFilterAPI by default, but they can be disabled by config options above.`, - }, - { - Name: "EthTxHashMappingLifetimeDays", - Type: "int", - - Comment: `EthTxHashMappingLifetimeDays the transaction hash lookup database will delete mappings that have been stored for more than x days -Set to 0 to keep all mappings`, + Comment: `EnableEthRPC enables eth_ RPC methods. +Note: Setting this to true will also require that ChainIndexer is enabled, otherwise it will cause an error at startup.`, }, { Name: "EthTraceFilterMaxResults", @@ -236,12 +263,6 @@ Set to 0 to keep all mappings`, Comment: `EthTraceFilterMaxResults sets the maximum results returned per request by trace_filter`, }, - { - Name: "Events", - Type: "DeprecatedEvents", - - Comment: ``, - }, { Name: "EthBlkCacheSize", Type: "int", @@ -297,8 +318,8 @@ Note: Setting this value to 0 disables the cache.`, Comment: ``, }, { - Name: "Index", - Type: "IndexConfig", + Name: "ChainIndexer", + Type: "ChainIndexerConfig", Comment: ``, }, @@ -342,15 +363,6 @@ in a cluster. Only 1 is required`, Comment: `The port to find Yugabyte. Blank for default.`, }, }, - "IndexConfig": { - { - Name: "EnableMsgIndex", - Type: "bool", - - Comment: `EXPERIMENTAL FEATURE. USE WITH CAUTION -EnableMsgIndex enables indexing of messages on chain.`, - }, - }, "JournalConfig": { { Name: "DisabledEvents", diff --git a/node/config/types.go b/node/config/types.go index d7753d4e19e..46be4089d63 100644 --- a/node/config/types.go +++ b/node/config/types.go @@ -25,7 +25,7 @@ type FullNode struct { Chainstore Chainstore Fevm FevmConfig Events EventsConfig - Index IndexConfig + ChainIndexer ChainIndexerConfig FaultReporter FaultReporterConfig } @@ -538,19 +538,13 @@ type FeeConfig struct { } type FevmConfig struct { - // EnableEthRPC enables eth_ rpc, and enables storing a mapping of eth transaction hashes to filecoin message Cids. - // This will also enable the RealTimeFilterAPI and HistoricFilterAPI by default, but they can be disabled by config options above. + // EnableEthRPC enables eth_ RPC methods. + // Note: Setting this to true will also require that ChainIndexer is enabled, otherwise it will cause an error at startup. EnableEthRPC bool - // EthTxHashMappingLifetimeDays the transaction hash lookup database will delete mappings that have been stored for more than x days - // Set to 0 to keep all mappings - EthTxHashMappingLifetimeDays int - // EthTraceFilterMaxResults sets the maximum results returned per request by trace_filter EthTraceFilterMaxResults uint64 - Events DeprecatedEvents `toml:"Events,omitempty"` - // EthBlkCacheSize specifies the size of the cache used for caching Ethereum blocks. // This cache enhances the performance of the eth_getBlockByHash RPC call by minimizing the need to access chain state for // recently requested blocks that are already cached. @@ -559,43 +553,10 @@ type FevmConfig struct { EthBlkCacheSize int } -type DeprecatedEvents struct { - // DisableRealTimeFilterAPI is DEPRECATED and will be removed in a future release. Use Events.DisableRealTimeFilterAPI instead. - DisableRealTimeFilterAPI bool `moved:"Events.DisableRealTimeFilterAPI" toml:"DisableRealTimeFilterAPI,omitempty"` - - // DisableHistoricFilterAPI is DEPRECATED and will be removed in a future release. Use Events.DisableHistoricFilterAPI instead. - DisableHistoricFilterAPI bool `moved:"Events.DisableHistoricFilterAPI" toml:"DisableHistoricFilterAPI,omitempty"` - - // FilterTTL is DEPRECATED and will be removed in a future release. Use Events.FilterTTL instead. - FilterTTL Duration `moved:"Events.FilterTTL" toml:"FilterTTL,omitzero"` - - // MaxFilters is DEPRECATED and will be removed in a future release. Use Events.MaxFilters instead. - MaxFilters int `moved:"Events.MaxFilters" toml:"MaxFilters,omitzero"` - - // MaxFilterResults is DEPRECATED and will be removed in a future release. Use Events.MaxFilterResults instead. - MaxFilterResults int `moved:"Events.MaxFilterResults" toml:"MaxFilterResults,omitzero"` - - // MaxFilterHeightRange is DEPRECATED and will be removed in a future release. Use Events.MaxFilterHeightRange instead. - MaxFilterHeightRange uint64 `moved:"Events.MaxFilterHeightRange" toml:"MaxFilterHeightRange,omitzero"` - - // DatabasePath is DEPRECATED and will be removed in a future release. Use Events.DatabasePath instead. - DatabasePath string `moved:"Events.DatabasePath" toml:"DatabasePath,omitempty"` -} - type EventsConfig struct { - // DisableRealTimeFilterAPI will disable the RealTimeFilterAPI that can create and query filters for actor events as they are emitted. - // The API is enabled when Fevm.EnableEthRPC or EnableActorEventsAPI is true, but can be disabled selectively with this flag. - DisableRealTimeFilterAPI bool - - // DisableHistoricFilterAPI will disable the HistoricFilterAPI that can create and query filters for actor events - // that occurred in the past. HistoricFilterAPI maintains a queryable index of events. - // The API is enabled when Fevm.EnableEthRPC or EnableActorEventsAPI is true, but can be disabled selectively with this flag. - DisableHistoricFilterAPI bool - // EnableActorEventsAPI enables the Actor events API that enables clients to consume events // emitted by (smart contracts + built-in Actors). - // This will also enable the RealTimeFilterAPI and HistoricFilterAPI by default, but they can be - // disabled by setting their respective Disable* options. + // Note: Setting this to true will also require that ChainIndexer is enabled, otherwise it will cause an error at startup. EnableActorEventsAPI bool // FilterTTL specifies the time to live for actor event filters. Filters that haven't been accessed longer than @@ -615,23 +576,53 @@ type EventsConfig struct { // MaxFilterHeightRange specifies the maximum range of heights that can be used in a filter (to avoid querying // the entire chain) MaxFilterHeightRange uint64 +} - // DatabasePath is the full path to a sqlite database that will be used to index actor events to - // support the historic filter APIs. If the database does not exist it will be created. The directory containing - // the database must already exist and be writeable. If a relative path is provided here, sqlite treats it as - // relative to the CWD (current working directory). - DatabasePath string +type ChainIndexerConfig struct { + // EnableIndexer controls whether the chain indexer is active. + // The chain indexer is responsible for indexing tipsets, messages, and events from the chain state. + // It is a crucial component for optimizing Lotus RPC response times. + // + // Default: false (indexer is disabled) + // + // Setting this to true will enable the indexer, which will significantly improve RPC performance. + // It is strongly recommended to keep this set to true if you are an RPC provider. + EnableIndexer bool - // Others, not implemented yet: - // Set a limit on the number of active websocket subscriptions (may be zero) - // Set a timeout for subscription clients - // Set upper bound on index size -} + // GCRetentionEpochs specifies the number of epochs for which data is retained in the Indexer. + // The garbage collection (GC) process removes data older than this retention period. + // Setting this to 0 disables GC, preserving all historical data indefinitely. + // + // If set, the minimum value must be greater than builtin.EpochsInDay (i.e. "2880" epochs for mainnet). + // This ensures a reasonable retention period for the indexed data. + // + // Default: 0 (GC disabled) + GCRetentionEpochs int64 + + // ReconcileEmptyIndex determines whether to reconcile the index with the chain state + // during startup when the index is empty. + // + // When set to true: + // - On startup, if the index is empty, the indexer will index the available + // chain state on the node albeit within the MaxReconcileTipsets limit. + // + // When set to false: + // - The indexer will not automatically re-index the chain state on startup if the index is empty. + // + // Default: false + // + // Note: The number of tipsets reconciled (i.e. indexed) during this process can be + // controlled using the MaxReconcileTipsets option. + ReconcileEmptyIndex bool -type IndexConfig struct { - // EXPERIMENTAL FEATURE. USE WITH CAUTION - // EnableMsgIndex enables indexing of messages on chain. - EnableMsgIndex bool + // MaxReconcileTipsets limits the number of tipsets to reconcile with the chain during startup. + // It represents the maximum number of tipsets to index from the chain state that are absent in the index. + // + // Default: 3 * epochsPerDay (approximately 3 days of chain history) + // + // Note: Setting this value too low may result in incomplete indexing, while setting it too high + // may increase startup time. + MaxReconcileTipsets uint64 } type HarmonyDB struct { diff --git a/node/impl/full.go b/node/impl/full.go index 6ed0bf3eb11..24240e3df2c 100644 --- a/node/impl/full.go +++ b/node/impl/full.go @@ -36,6 +36,7 @@ type FullNodeAPI struct { full.EthAPI full.ActorEventsAPI full.F3API + full.ChainIndexAPI DS dtypes.MetadataDS NetworkName dtypes.NetworkName diff --git a/node/impl/full/actor_events.go b/node/impl/full/actor_events.go index 4216ef3c6bf..29faae2eb4f 100644 --- a/node/impl/full/actor_events.go +++ b/node/impl/full/actor_events.go @@ -14,6 +14,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/events/filter" + "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/types" ) @@ -283,7 +284,7 @@ func (a *ActorEventHandler) SubscribeActorEventsRaw(ctx context.Context, evtFilt nextBacklogHeightUpdate := a.clock.Now().Add(a.blockDelay) collectEvent := func(ev interface{}) bool { - ce, ok := ev.(*filter.CollectedEvent) + ce, ok := ev.(*index.CollectedEvent) if !ok { log.Errorf("got unexpected value from event filter: %T", ev) return false diff --git a/node/impl/full/actor_events_test.go b/node/impl/full/actor_events_test.go index 16bcfe06ab9..4367f715f81 100644 --- a/node/impl/full/actor_events_test.go +++ b/node/impl/full/actor_events_test.go @@ -19,6 +19,7 @@ import ( "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/lotus/chain/events/filter" + "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/types" ) @@ -343,7 +344,7 @@ func TestSubscribeActorEventsRaw(t *testing.T) { req.NoError(err) mockChain.setHeaviestTipSet(ts) - var eventsThisEpoch []*filter.CollectedEvent + var eventsThisEpoch []*index.CollectedEvent if thisHeight <= finishHeight { eventsThisEpoch = allEvents[(thisHeight-filterStartHeight)*eventsPerEpoch : (thisHeight-filterStartHeight+2)*eventsPerEpoch] } @@ -541,13 +542,13 @@ type mockFilter struct { id types.FilterID lastTaken time.Time ch chan<- interface{} - historicalEvents []*filter.CollectedEvent + historicalEvents []*index.CollectedEvent subChannelCalls int clearSubChannelCalls int lk sync.Mutex } -func newMockFilter(ctx context.Context, t *testing.T, rng *pseudo.Rand, historicalEvents []*filter.CollectedEvent) *mockFilter { +func newMockFilter(ctx context.Context, t *testing.T, rng *pseudo.Rand, historicalEvents []*index.CollectedEvent) *mockFilter { t.Helper() var id [32]byte _, err := rng.Read(id[:]) @@ -560,7 +561,7 @@ func newMockFilter(ctx context.Context, t *testing.T, rng *pseudo.Rand, historic } } -func (m *mockFilter) sendEventToChannel(e *filter.CollectedEvent) { +func (m *mockFilter) sendEventToChannel(e *index.CollectedEvent) { m.lk.Lock() defer m.lk.Unlock() if m.ch != nil { @@ -614,7 +615,7 @@ func (m *mockFilter) ClearSubChannel() { m.ch = nil } -func (m *mockFilter) TakeCollectedEvents(context.Context) []*filter.CollectedEvent { +func (m *mockFilter) TakeCollectedEvents(context.Context) []*index.CollectedEvent { e := m.historicalEvents m.historicalEvents = nil m.lastTaken = time.Now() @@ -768,7 +769,7 @@ func epochPtr(i int) *abi.ChainEpoch { return &e } -func collectedToActorEvents(collected []*filter.CollectedEvent) []*types.ActorEvent { +func collectedToActorEvents(collected []*index.CollectedEvent) []*types.ActorEvent { var out []*types.ActorEvent for _, c := range collected { out = append(out, &types.ActorEvent{ @@ -783,8 +784,8 @@ func collectedToActorEvents(collected []*filter.CollectedEvent) []*types.ActorEv return out } -func makeCollectedEvents(t *testing.T, rng *pseudo.Rand, eventStartHeight, eventsPerHeight, eventEndHeight int64) []*filter.CollectedEvent { - var out []*filter.CollectedEvent +func makeCollectedEvents(t *testing.T, rng *pseudo.Rand, eventStartHeight, eventsPerHeight, eventEndHeight int64) []*index.CollectedEvent { + var out []*index.CollectedEvent for h := eventStartHeight; h <= eventEndHeight; h++ { for i := int64(0); i < eventsPerHeight; i++ { out = append(out, makeCollectedEvent(t, rng, types.NewTipSetKey(mkCid(t, fmt.Sprintf("h=%d", h))), abi.ChainEpoch(h))) @@ -793,11 +794,11 @@ func makeCollectedEvents(t *testing.T, rng *pseudo.Rand, eventStartHeight, event return out } -func makeCollectedEvent(t *testing.T, rng *pseudo.Rand, tsKey types.TipSetKey, height abi.ChainEpoch) *filter.CollectedEvent { +func makeCollectedEvent(t *testing.T, rng *pseudo.Rand, tsKey types.TipSetKey, height abi.ChainEpoch) *index.CollectedEvent { addr, err := address.NewIDAddress(uint64(rng.Int63())) require.NoError(t, err) - return &filter.CollectedEvent{ + return &index.CollectedEvent{ Entries: []types.EventEntry{ {Flags: 0x01, Key: "k1", Codec: cid.Raw, Value: []byte("v1")}, {Flags: 0x01, Key: "k2", Codec: cid.Raw, Value: []byte("v2")}, diff --git a/node/impl/full/chain_index.go b/node/impl/full/chain_index.go new file mode 100644 index 00000000000..09c7a1ce3d3 --- /dev/null +++ b/node/impl/full/chain_index.go @@ -0,0 +1,46 @@ +package full + +import ( + "context" + "errors" + + "go.uber.org/fx" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/index" + "github.com/filecoin-project/lotus/chain/types" +) + +type ChainIndexerAPI interface { + ChainValidateIndex(ctx context.Context, epoch abi.ChainEpoch, backfill bool) (*types.IndexValidation, error) +} + +var ( + _ ChainIndexerAPI = *new(api.FullNode) +) + +type ChainIndexAPI struct { + fx.In + ChainIndexerAPI +} + +type ChainIndexHandler struct { + indexer index.Indexer +} + +func (ch *ChainIndexHandler) ChainValidateIndex(ctx context.Context, epoch abi.ChainEpoch, backfill bool) (*types.IndexValidation, error) { + if ch.indexer == nil { + return nil, errors.New("chain indexer is disabled") + } + return ch.indexer.ChainValidateIndex(ctx, epoch, backfill) +} + +var _ ChainIndexerAPI = (*ChainIndexHandler)(nil) + +func NewChainIndexHandler(indexer index.Indexer) *ChainIndexHandler { + return &ChainIndexHandler{ + indexer: indexer, + } +} diff --git a/node/impl/full/eth.go b/node/impl/full/eth.go index a3164c000ad..0777baf99f3 100644 --- a/node/impl/full/eth.go +++ b/node/impl/full/eth.go @@ -32,8 +32,8 @@ import ( "github.com/filecoin-project/lotus/chain/actors" builtinactors "github.com/filecoin-project/lotus/chain/actors/builtin" builtinevm "github.com/filecoin-project/lotus/chain/actors/builtin/evm" - "github.com/filecoin-project/lotus/chain/ethhashlookup" "github.com/filecoin-project/lotus/chain/events/filter" + "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/messagepool" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" @@ -42,15 +42,13 @@ import ( "github.com/filecoin-project/lotus/node/modules/dtypes" ) -var ErrUnsupported = errors.New("unsupported method") - -const maxEthFeeHistoryRewardPercentiles = 100 - var ( - // wait for 3 epochs - eventReadTimeout = 90 * time.Second + ErrUnsupported = errors.New("unsupported method") + ErrChainIndexerDisabled = errors.New("chain indexer is disabled; please enable the ChainIndexer to use the ETH RPC API") ) +const maxEthFeeHistoryRewardPercentiles = 100 + type EthModuleAPI interface { EthBlockNumber(ctx context.Context) (ethtypes.EthUint64, error) EthAccounts(ctx context.Context) ([]ethtypes.EthAddress, error) @@ -103,6 +101,7 @@ type EthEventAPI interface { var ( _ EthModuleAPI = *new(api.FullNode) _ EthEventAPI = *new(api.FullNode) + _ EthModuleAPI = *new(api.Gateway) ) @@ -137,13 +136,14 @@ type EthModule struct { Chain *store.ChainStore Mpool *messagepool.MessagePool StateManager *stmgr.StateManager - EthTxHashManager EthTxHashManager EthTraceFilterMaxResults uint64 EthEventHandler *EthEventHandler EthBlkCache *arc.ARCCache[cid.Cid, *ethtypes.EthBlock] // caches blocks by their CID but blocks only have the transaction hashes EthBlkTxCache *arc.ARCCache[cid.Cid, *ethtypes.EthBlock] // caches blocks along with full transaction payload by their CID + ChainIndexer index.Indexer + ChainAPI MpoolAPI StateAPI @@ -168,10 +168,10 @@ var _ EthEventAPI = (*EthEventHandler)(nil) type EthAPI struct { fx.In - Chain *store.ChainStore - StateManager *stmgr.StateManager - EthTxHashManager EthTxHashManager - MpoolAPI MpoolAPI + Chain *store.ChainStore + StateManager *stmgr.StateManager + ChainIndexer index.Indexer + MpoolAPI MpoolAPI EthModuleAPI EthEventAPI @@ -369,10 +369,18 @@ func (a *EthModule) EthGetTransactionByHashLimited(ctx context.Context, txHash * if txHash == nil { return nil, nil } + if a.ChainIndexer == nil { + return nil, ErrChainIndexerDisabled + } - c, err := a.EthTxHashManager.GetCidFromHash(*txHash) - if err != nil { - log.Debug("could not find transaction hash %s in lookup table", txHash.String()) + var c cid.Cid + var err error + c, err = a.ChainIndexer.GetCidFromHash(ctx, *txHash) + if err != nil && errors.Is(err, index.ErrNotFound) { + log.Debug("could not find transaction hash %s in chain indexer", txHash.String()) + } else if err != nil { + log.Errorf("failed to lookup transaction hash %s in chain indexer: %s", txHash.String(), err) + return nil, xerrors.Errorf("failed to lookup transaction hash %s in chain indexer: %w", txHash.String(), err) } // This isn't an eth transaction we have the mapping for, so let's look it up as a filecoin message @@ -428,14 +436,23 @@ func (a *EthModule) EthGetMessageCidByTransactionHash(ctx context.Context, txHas if txHash == nil { return nil, nil } + if a.ChainIndexer == nil { + return nil, ErrChainIndexerDisabled + } - c, err := a.EthTxHashManager.GetCidFromHash(*txHash) - // We fall out of the first condition and continue - if errors.Is(err, ethhashlookup.ErrNotFound) { - log.Debug("could not find transaction hash %s in lookup table", txHash.String()) + var c cid.Cid + var err error + c, err = a.ChainIndexer.GetCidFromHash(ctx, *txHash) + if err != nil && errors.Is(err, index.ErrNotFound) { + log.Debug("could not find transaction hash %s in chain indexer", txHash.String()) } else if err != nil { - return nil, xerrors.Errorf("database error: %w", err) - } else { + log.Errorf("failed to lookup transaction hash %s in chain indexer: %s", txHash.String(), err) + return nil, xerrors.Errorf("failed to lookup transaction hash %s in chain indexer: %w", txHash.String(), err) + } + + if errors.Is(err, index.ErrNotFound) { + log.Debug("could not find transaction hash %s in lookup table", txHash.String()) + } else if a.ChainIndexer != nil { return &c, nil } @@ -524,9 +541,18 @@ func (a *EthModule) EthGetTransactionReceipt(ctx context.Context, txHash ethtype } func (a *EthModule) EthGetTransactionReceiptLimited(ctx context.Context, txHash ethtypes.EthHash, limit abi.ChainEpoch) (*api.EthTxReceipt, error) { - c, err := a.EthTxHashManager.GetCidFromHash(txHash) - if err != nil { - log.Debug("could not find transaction hash %s in lookup table", txHash.String()) + var c cid.Cid + var err error + if a.ChainIndexer == nil { + return nil, ErrChainIndexerDisabled + } + + c, err = a.ChainIndexer.GetCidFromHash(ctx, txHash) + if err != nil && errors.Is(err, index.ErrNotFound) { + log.Debug("could not find transaction hash %s in chain indexer", txHash.String()) + } else if err != nil { + log.Errorf("failed to lookup transaction hash %s in chain indexer: %s", txHash.String(), err) + return nil, xerrors.Errorf("failed to lookup transaction hash %s in chain indexer: %w", txHash.String(), err) } // This isn't an eth transaction we have the mapping for, so let's look it up as a filecoin message @@ -1013,14 +1039,14 @@ func (a *EthModule) EthGasPrice(ctx context.Context) (ethtypes.EthBigInt, error) } func (a *EthModule) EthSendRawTransaction(ctx context.Context, rawTx ethtypes.EthBytes) (ethtypes.EthHash, error) { - return ethSendRawTransaction(ctx, a.MpoolAPI, a.EthTxHashManager, rawTx, false) + return ethSendRawTransaction(ctx, a.MpoolAPI, a.ChainIndexer, rawTx, false) } func (a *EthAPI) EthSendRawTransactionUntrusted(ctx context.Context, rawTx ethtypes.EthBytes) (ethtypes.EthHash, error) { - return ethSendRawTransaction(ctx, a.MpoolAPI, a.EthTxHashManager, rawTx, true) + return ethSendRawTransaction(ctx, a.MpoolAPI, a.ChainIndexer, rawTx, true) } -func ethSendRawTransaction(ctx context.Context, mpool MpoolAPI, ethTxHashManager EthTxHashManager, rawTx ethtypes.EthBytes, untrusted bool) (ethtypes.EthHash, error) { +func ethSendRawTransaction(ctx context.Context, mpool MpoolAPI, indexer index.Indexer, rawTx ethtypes.EthBytes, untrusted bool) (ethtypes.EthHash, error) { txArgs, err := ethtypes.ParseEthTransaction(rawTx) if err != nil { return ethtypes.EmptyEthHash, err @@ -1048,8 +1074,10 @@ func ethSendRawTransaction(ctx context.Context, mpool MpoolAPI, ethTxHashManager // make it immediately available in the transaction hash lookup db, even though it will also // eventually get there via the mpool - if err := ethTxHashManager.UpsertHash(txHash, smsg.Cid()); err != nil { - log.Errorf("error inserting tx mapping to db: %s", err) + if indexer != nil { + if err := indexer.IndexEthTxHash(ctx, txHash, smsg.Cid()); err != nil { + log.Errorf("error indexing tx: %s", err) + } } return ethtypes.EthHashFromTxBytes(rawTx), nil @@ -1661,18 +1689,18 @@ func (e *EthEventHandler) getEthLogsForBlockAndTransaction(ctx context.Context, func (e *EthEventHandler) EthGetLogs(ctx context.Context, filterSpec *ethtypes.EthFilterSpec) (*ethtypes.EthFilterResult, error) { ces, err := e.ethGetEventsForFilter(ctx, filterSpec) if err != nil { - return nil, err + return nil, xerrors.Errorf("failed to get events for filter: %w", err) } return ethFilterResultFromEvents(ctx, ces, e.SubManager.StateAPI) } -func (e *EthEventHandler) ethGetEventsForFilter(ctx context.Context, filterSpec *ethtypes.EthFilterSpec) ([]*filter.CollectedEvent, error) { +func (e *EthEventHandler) ethGetEventsForFilter(ctx context.Context, filterSpec *ethtypes.EthFilterSpec) ([]*index.CollectedEvent, error) { if e.EventFilterManager == nil { return nil, api.ErrNotSupported } - if e.EventFilterManager.EventIndex == nil { - return nil, xerrors.Errorf("cannot use eth_get_logs if historical event index is disabled") + if e.EventFilterManager.ChainIndexer == nil { + return nil, ErrChainIndexerDisabled } pf, err := e.parseEthFilterSpec(filterSpec) @@ -1680,104 +1708,37 @@ func (e *EthEventHandler) ethGetEventsForFilter(ctx context.Context, filterSpec return nil, xerrors.Errorf("failed to parse eth filter spec: %w", err) } - if pf.tipsetCid == cid.Undef { - maxHeight := pf.maxHeight - if maxHeight == -1 { - // heaviest tipset doesn't have events because its messages haven't been executed yet - maxHeight = e.Chain.GetHeaviestTipSet().Height() - 1 - } - - if maxHeight < 0 { - return nil, xerrors.Errorf("maxHeight requested is less than 0") - } - - // we can't return events for the heaviest tipset as the transactions in that tipset will be executed - // in the next non null tipset (because of Filecoin's "deferred execution" model) - if maxHeight > e.Chain.GetHeaviestTipSet().Height()-1 { - return nil, xerrors.Errorf("maxHeight requested is greater than the heaviest tipset") - } - - err := e.waitForHeightProcessed(ctx, maxHeight) - if err != nil { - return nil, err - } - // TODO: Ideally we should also check that events for the epoch at `pf.minheight` have been indexed - // However, it is currently tricky to check/guarantee this for two reasons: - // a) Event Index is not aware of null-blocks. This means that the Event Index wont be able to say whether the block at - // `pf.minheight` is a null block or whether it has no events - // b) There can be holes in the index where events at certain epoch simply haven't been indexed because of edge cases around - // node restarts while indexing. This needs a long term "auto-repair"/"automated-backfilling" implementation in the index - // So, for now, the best we can do is ensure that the event index has evenets for events at height >= `pf.maxHeight` - } else { + head := e.Chain.GetHeaviestTipSet() + // should not ask for events for a tipset >= head because of deferred execution + if pf.tipsetCid != cid.Undef { ts, err := e.Chain.GetTipSetByCid(ctx, pf.tipsetCid) if err != nil { return nil, xerrors.Errorf("failed to get tipset by cid: %w", err) } - err = e.waitForHeightProcessed(ctx, ts.Height()) - if err != nil { - return nil, err - } - - b, err := e.EventFilterManager.EventIndex.IsTipsetProcessed(ctx, pf.tipsetCid.Bytes()) - if err != nil { - return nil, xerrors.Errorf("failed to check if tipset events have been indexed: %w", err) + if ts.Height() >= head.Height() { + return nil, xerrors.New("cannot ask for events for a tipset at or greater than head") } - if !b { - return nil, xerrors.Errorf("event index failed to index tipset %s", pf.tipsetCid.String()) - } - } - - // Fill a filter and collect events - f, err := e.EventFilterManager.Fill(ctx, pf.minHeight, pf.maxHeight, pf.tipsetCid, pf.addresses, pf.keys) - if err != nil { - return nil, xerrors.Errorf("failed to install event filter: %w", err) } - ces := f.TakeCollectedEvents(ctx) - - return ces, nil -} -// note that we can have null blocks at the given height and the event Index is not null block aware -// so, what we do here is wait till we see the event index contain a block at a height greater than the given height -func (e *EthEventHandler) waitForHeightProcessed(ctx context.Context, height abi.ChainEpoch) error { - ei := e.EventFilterManager.EventIndex - if height > e.Chain.GetHeaviestTipSet().Height() { - return xerrors.New("height is in the future") + if pf.minHeight >= head.Height() || pf.maxHeight >= head.Height() { + return nil, xerrors.New("cannot ask for events for a tipset at or greater than head") } - ctx, cancel := context.WithTimeout(ctx, eventReadTimeout) - defer cancel() - - // if the height we're interested in has already been indexed -> there's nothing to do here - if b, err := ei.IsHeightPast(ctx, uint64(height)); err != nil { - return xerrors.Errorf("failed to check if event index has events for given height: %w", err) - } else if b { - return nil + ef := &index.EventFilter{ + MinHeight: pf.minHeight, + MaxHeight: pf.maxHeight, + TipsetCid: pf.tipsetCid, + Addresses: pf.addresses, + KeysWithCodec: pf.keys, + MaxResults: e.EventFilterManager.MaxFilterResults, } - // subscribe for updates to the event index - subCh, unSubscribeF := ei.SubscribeUpdates() - defer unSubscribeF() - - // it could be that the event index was update while the subscription was being processed -> check if index has what we need now - if b, err := ei.IsHeightPast(ctx, uint64(height)); err != nil { - return xerrors.Errorf("failed to check if event index has events for given height: %w", err) - } else if b { - return nil + ces, err := e.EventFilterManager.ChainIndexer.GetEventsForFilter(ctx, ef) + if err != nil { + return nil, xerrors.Errorf("failed to get events for filter from chain indexer: %w", err) } - for { - select { - case <-subCh: - if b, err := ei.IsHeightPast(ctx, uint64(height)); err != nil { - return xerrors.Errorf("failed to check if event index has events for given height: %w", err) - } else if b { - return nil - } - case <-ctx.Done(): - return ctx.Err() - } - } + return ces, nil } func (e *EthEventHandler) EthGetFilterChanges(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) { @@ -1903,7 +1864,8 @@ func (e *EthEventHandler) parseEthFilterSpec(filterSpec *ethtypes.EthFilterSpec) tipsetCid = filterSpec.BlockHash.ToCid() } else { var err error - minHeight, maxHeight, err = parseBlockRange(e.Chain.GetHeaviestTipSet().Height(), filterSpec.FromBlock, filterSpec.ToBlock, e.MaxFilterHeightRange) + // Because of deferred execution, we need to subtract 1 from the heaviest tipset height for the "heaviest" parameter + minHeight, maxHeight, err = parseBlockRange(e.Chain.GetHeaviestTipSet().Height()-1, filterSpec.FromBlock, filterSpec.ToBlock, e.MaxFilterHeightRange) if err != nil { return nil, err } diff --git a/node/impl/full/eth_events.go b/node/impl/full/eth_events.go index 0c474b92fe2..850826ecf9c 100644 --- a/node/impl/full/eth_events.go +++ b/node/impl/full/eth_events.go @@ -13,13 +13,14 @@ import ( "github.com/filecoin-project/go-jsonrpc" "github.com/filecoin-project/lotus/chain/events/filter" + "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/ethtypes" ) type filterEventCollector interface { - TakeCollectedEvents(context.Context) []*filter.CollectedEvent + TakeCollectedEvents(context.Context) []*index.CollectedEvent } type filterMessageCollector interface { @@ -93,7 +94,7 @@ func ethLogFromEvent(entries []types.EventEntry) (data []byte, topics []ethtypes return data, topics, true } -func ethFilterLogsFromEvents(ctx context.Context, evs []*filter.CollectedEvent, sa StateAPI) ([]ethtypes.EthLog, error) { +func ethFilterLogsFromEvents(ctx context.Context, evs []*index.CollectedEvent, sa StateAPI) ([]ethtypes.EthLog, error) { var logs []ethtypes.EthLog for _, ev := range evs { log := ethtypes.EthLog{ @@ -140,7 +141,7 @@ func ethFilterLogsFromEvents(ctx context.Context, evs []*filter.CollectedEvent, return logs, nil } -func ethFilterResultFromEvents(ctx context.Context, evs []*filter.CollectedEvent, sa StateAPI) (*ethtypes.EthFilterResult, error) { +func ethFilterResultFromEvents(ctx context.Context, evs []*index.CollectedEvent, sa StateAPI) (*ethtypes.EthFilterResult, error) { logs, err := ethFilterLogsFromEvents(ctx, evs, sa) if err != nil { return nil, err @@ -347,8 +348,8 @@ func (e *ethSubscription) start(ctx context.Context) { return case v := <-e.in: switch vt := v.(type) { - case *filter.CollectedEvent: - evs, err := ethFilterResultFromEvents(ctx, []*filter.CollectedEvent{vt}, e.StateAPI) + case *index.CollectedEvent: + evs, err := ethFilterResultFromEvents(ctx, []*index.CollectedEvent{vt}, e.StateAPI) if err != nil { continue } diff --git a/node/impl/full/txhashmanager.go b/node/impl/full/txhashmanager.go deleted file mode 100644 index 00a5980a3fe..00000000000 --- a/node/impl/full/txhashmanager.go +++ /dev/null @@ -1,201 +0,0 @@ -package full - -import ( - "context" - "time" - - "github.com/ipfs/go-cid" - - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/crypto" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build/buildconstants" - "github.com/filecoin-project/lotus/chain/ethhashlookup" - "github.com/filecoin-project/lotus/chain/events" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/types/ethtypes" -) - -type EthTxHashManager interface { - events.TipSetObserver - - PopulateExistingMappings(ctx context.Context, minHeight abi.ChainEpoch) error - ProcessSignedMessage(ctx context.Context, msg *types.SignedMessage) - UpsertHash(txHash ethtypes.EthHash, c cid.Cid) error - GetCidFromHash(txHash ethtypes.EthHash) (cid.Cid, error) - DeleteEntriesOlderThan(days int) (int64, error) -} - -var ( - _ EthTxHashManager = (*ethTxHashManager)(nil) - _ EthTxHashManager = (*EthTxHashManagerDummy)(nil) -) - -type ethTxHashManager struct { - stateAPI StateAPI - transactionHashLookup *ethhashlookup.EthTxHashLookup -} - -func NewEthTxHashManager(stateAPI StateAPI, transactionHashLookup *ethhashlookup.EthTxHashLookup) EthTxHashManager { - return ðTxHashManager{ - stateAPI: stateAPI, - transactionHashLookup: transactionHashLookup, - } -} - -func (m *ethTxHashManager) Revert(ctx context.Context, from, to *types.TipSet) error { - return nil -} - -func (m *ethTxHashManager) PopulateExistingMappings(ctx context.Context, minHeight abi.ChainEpoch) error { - if minHeight < buildconstants.UpgradeHyggeHeight { - minHeight = buildconstants.UpgradeHyggeHeight - } - - ts := m.stateAPI.Chain.GetHeaviestTipSet() - for ts.Height() > minHeight { - for _, block := range ts.Blocks() { - msgs, err := m.stateAPI.Chain.SecpkMessagesForBlock(ctx, block) - if err != nil { - // If we can't find the messages, we've either imported from snapshot or pruned the store - log.Debug("exiting message mapping population at epoch ", ts.Height()) - return nil - } - - for _, msg := range msgs { - m.ProcessSignedMessage(ctx, msg) - } - } - - var err error - ts, err = m.stateAPI.Chain.GetTipSetFromKey(ctx, ts.Parents()) - if err != nil { - return err - } - } - - return nil -} - -func (m *ethTxHashManager) Apply(ctx context.Context, from, to *types.TipSet) error { - for _, blk := range to.Blocks() { - _, smsgs, err := m.stateAPI.Chain.MessagesForBlock(ctx, blk) - if err != nil { - return err - } - - for _, smsg := range smsgs { - if smsg.Signature.Type != crypto.SigTypeDelegated { - continue - } - - hash, err := ethTxHashFromSignedMessage(smsg) - if err != nil { - return err - } - - err = m.transactionHashLookup.UpsertHash(hash, smsg.Cid()) - if err != nil { - return err - } - } - } - - return nil -} - -func (m *ethTxHashManager) UpsertHash(txHash ethtypes.EthHash, c cid.Cid) error { - return m.transactionHashLookup.UpsertHash(txHash, c) -} - -func (m *ethTxHashManager) GetCidFromHash(txHash ethtypes.EthHash) (cid.Cid, error) { - return m.transactionHashLookup.GetCidFromHash(txHash) -} - -func (m *ethTxHashManager) DeleteEntriesOlderThan(days int) (int64, error) { - return m.transactionHashLookup.DeleteEntriesOlderThan(days) -} - -func (m *ethTxHashManager) ProcessSignedMessage(ctx context.Context, msg *types.SignedMessage) { - if msg.Signature.Type != crypto.SigTypeDelegated { - return - } - - ethTx, err := ethtypes.EthTransactionFromSignedFilecoinMessage(msg) - if err != nil { - log.Errorf("error converting filecoin message to eth tx: %s", err) - return - } - - txHash, err := ethTx.TxHash() - if err != nil { - log.Errorf("error hashing transaction: %s", err) - return - } - - err = m.UpsertHash(txHash, msg.Cid()) - if err != nil { - log.Errorf("error inserting tx mapping to db: %s", err) - return - } -} - -func WaitForMpoolUpdates(ctx context.Context, ch <-chan api.MpoolUpdate, manager EthTxHashManager) { - for { - select { - case <-ctx.Done(): - return - case u := <-ch: - if u.Type != api.MpoolAdd { - continue - } - - manager.ProcessSignedMessage(ctx, u.Message) - } - } -} - -func EthTxHashGC(ctx context.Context, retentionDays int, manager EthTxHashManager) { - if retentionDays == 0 { - return - } - - gcPeriod := 1 * time.Hour - for { - entriesDeleted, err := manager.DeleteEntriesOlderThan(retentionDays) - if err != nil { - log.Errorf("error garbage collecting eth transaction hash database: %s", err) - } - log.Info("garbage collection run on eth transaction hash lookup database. %d entries deleted", entriesDeleted) - time.Sleep(gcPeriod) - } -} - -type EthTxHashManagerDummy struct{} - -func (d *EthTxHashManagerDummy) PopulateExistingMappings(ctx context.Context, minHeight abi.ChainEpoch) error { - return nil -} - -func (d *EthTxHashManagerDummy) Revert(ctx context.Context, from, to *types.TipSet) error { - return nil -} - -func (d *EthTxHashManagerDummy) Apply(ctx context.Context, from, to *types.TipSet) error { - return nil -} - -func (d *EthTxHashManagerDummy) ProcessSignedMessage(ctx context.Context, msg *types.SignedMessage) {} - -func (d *EthTxHashManagerDummy) UpsertHash(txHash ethtypes.EthHash, c cid.Cid) error { - return nil -} - -func (d *EthTxHashManagerDummy) GetCidFromHash(txHash ethtypes.EthHash) (cid.Cid, error) { - return cid.Undef, nil -} - -func (d *EthTxHashManagerDummy) DeleteEntriesOlderThan(days int) (int64, error) { - return 0, nil -} diff --git a/node/modules/actorevent.go b/node/modules/actorevent.go index 3b02be1c4e0..a77ae271a63 100644 --- a/node/modules/actorevent.go +++ b/node/modules/actorevent.go @@ -2,11 +2,9 @@ package modules import ( "context" - "path/filepath" "time" "go.uber.org/fx" - "golang.org/x/xerrors" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" @@ -14,6 +12,7 @@ import ( "github.com/filecoin-project/lotus/build/buildconstants" "github.com/filecoin-project/lotus/chain/events" "github.com/filecoin-project/lotus/chain/events/filter" + "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/messagepool" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" @@ -43,7 +42,7 @@ func EthEventHandler(cfg config.EventsConfig, enableEthRPC bool) func(helpers.Me SubscribtionCtx: ctx, } - if !enableEthRPC || cfg.DisableRealTimeFilterAPI { + if !enableEthRPC { // all event functionality is disabled // the historic filter API relies on the real time one return ee, nil @@ -95,40 +94,17 @@ func EthEventHandler(cfg config.EventsConfig, enableEthRPC bool) func(helpers.Me } } -func EventFilterManager(cfg config.EventsConfig) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, *store.ChainStore, *stmgr.StateManager, EventHelperAPI, full.ChainAPI) (*filter.EventFilterManager, error) { - return func(mctx helpers.MetricsCtx, r repo.LockedRepo, lc fx.Lifecycle, cs *store.ChainStore, sm *stmgr.StateManager, evapi EventHelperAPI, chainapi full.ChainAPI) (*filter.EventFilterManager, error) { +func EventFilterManager(cfg config.EventsConfig) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, *store.ChainStore, + *stmgr.StateManager, EventHelperAPI, full.ChainAPI, index.Indexer) (*filter.EventFilterManager, error) { + return func(mctx helpers.MetricsCtx, r repo.LockedRepo, lc fx.Lifecycle, cs *store.ChainStore, sm *stmgr.StateManager, + evapi EventHelperAPI, chainapi full.ChainAPI, ci index.Indexer) (*filter.EventFilterManager, error) { ctx := helpers.LifecycleCtx(mctx, lc) // Enable indexing of actor events - var eventIndex *filter.EventIndex - if !cfg.DisableHistoricFilterAPI { - var dbPath string - if cfg.DatabasePath == "" { - sqlitePath, err := r.SqlitePath() - if err != nil { - return nil, xerrors.Errorf("failed to resolve event index database path: %w", err) - } - dbPath = filepath.Join(sqlitePath, filter.DefaultDbFilename) - } else { - dbPath = cfg.DatabasePath - } - - var err error - eventIndex, err = filter.NewEventIndex(ctx, dbPath, chainapi.Chain) - if err != nil { - return nil, xerrors.Errorf("failed to initialize event index database: %w", err) - } - - lc.Append(fx.Hook{ - OnStop: func(context.Context) error { - return eventIndex.Close() - }, - }) - } fm := &filter.EventFilterManager{ - ChainStore: cs, - EventIndex: eventIndex, // will be nil unless EnableHistoricFilterAPI is true + ChainStore: cs, + ChainIndexer: ci, // TODO: // We don't need this address resolution anymore once https://github.com/filecoin-project/lotus/issues/11594 lands AddressResolver: func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool) { @@ -165,7 +141,7 @@ func EventFilterManager(cfg config.EventsConfig) func(helpers.MetricsCtx, repo.L func ActorEventHandler(cfg config.EventsConfig) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, *filter.EventFilterManager, *store.ChainStore, *stmgr.StateManager, EventHelperAPI, *messagepool.MessagePool, full.StateAPI, full.ChainAPI) (*full.ActorEventHandler, error) { return func(mctx helpers.MetricsCtx, r repo.LockedRepo, lc fx.Lifecycle, fm *filter.EventFilterManager, cs *store.ChainStore, sm *stmgr.StateManager, evapi EventHelperAPI, mp *messagepool.MessagePool, stateapi full.StateAPI, chainapi full.ChainAPI) (*full.ActorEventHandler, error) { - if !cfg.EnableActorEventsAPI || cfg.DisableRealTimeFilterAPI { + if !cfg.EnableActorEventsAPI { return full.NewActorEventHandler( cs, nil, // no EventFilterManager disables API calls diff --git a/node/modules/chain.go b/node/modules/chain.go index d6779a6305a..cf088283ea5 100644 --- a/node/modules/chain.go +++ b/node/modules/chain.go @@ -21,7 +21,6 @@ import ( "github.com/filecoin-project/lotus/chain/consensus/filcns" "github.com/filecoin-project/lotus/chain/exchange" "github.com/filecoin-project/lotus/chain/gen/slashfilter" - "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/messagepool" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" @@ -124,7 +123,7 @@ func NetworkName(mctx helpers.MetricsCtx, ctx := helpers.LifecycleCtx(mctx, lc) - sm, err := stmgr.NewStateManager(cs, tsexec, syscalls, us, nil, nil, index.DummyMsgIndex) + sm, err := stmgr.NewStateManager(cs, tsexec, syscalls, us, nil, nil, nil) if err != nil { return "", err } diff --git a/node/modules/chainindex.go b/node/modules/chainindex.go new file mode 100644 index 00000000000..d2307a77600 --- /dev/null +++ b/node/modules/chainindex.go @@ -0,0 +1,134 @@ +package modules + +import ( + "context" + "path/filepath" + + "go.uber.org/fx" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/events" + "github.com/filecoin-project/lotus/chain/index" + "github.com/filecoin-project/lotus/chain/messagepool" + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/node/impl/full" + "github.com/filecoin-project/lotus/node/modules/helpers" + "github.com/filecoin-project/lotus/node/repo" +) + +func ChainIndexer(cfg config.ChainIndexerConfig) func(lc fx.Lifecycle, mctx helpers.MetricsCtx, cs *store.ChainStore, r repo.LockedRepo) (index.Indexer, error) { + return func(lc fx.Lifecycle, mctx helpers.MetricsCtx, cs *store.ChainStore, r repo.LockedRepo) (index.Indexer, error) { + if !cfg.EnableIndexer { + log.Infof("ChainIndexer is disabled") + return nil, nil + } + + chainIndexPath, err := r.ChainIndexPath() + if err != nil { + return nil, err + } + + dbPath := filepath.Join(chainIndexPath, index.DefaultDbFilename) + chainIndexer, err := index.NewSqliteIndexer(dbPath, cs, cfg.GCRetentionEpochs, cfg.ReconcileEmptyIndex, cfg.MaxReconcileTipsets) + if err != nil { + return nil, err + } + + lc.Append(fx.Hook{ + OnStop: func(_ context.Context) error { + return chainIndexer.Close() + }, + }) + + return chainIndexer, nil + } +} + +func InitChainIndexer(lc fx.Lifecycle, mctx helpers.MetricsCtx, indexer index.Indexer, + evapi EventHelperAPI, mp *messagepool.MessagePool, sm *stmgr.StateManager) { + ctx := helpers.LifecycleCtx(mctx, lc) + + lc.Append(fx.Hook{ + OnStart: func(_ context.Context) error { + indexer.SetActorToDelegatedAddresFunc(func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool) { + idAddr, err := address.NewIDAddress(uint64(emitter)) + if err != nil { + return address.Undef, false + } + + actor, err := sm.LoadActor(ctx, idAddr, ts) + if err != nil || actor.DelegatedAddress == nil { + return idAddr, true + } + + return *actor.DelegatedAddress, true + }) + + indexer.SetRecomputeTipSetStateFunc(func(ctx context.Context, ts *types.TipSet) error { + _, _, err := sm.RecomputeTipSetState(ctx, ts) + return err + }) + + ch, err := mp.Updates(ctx) + if err != nil { + return err + } + go WaitForMpoolUpdates(ctx, ch, indexer) + + ev, err := events.NewEvents(ctx, &evapi) + if err != nil { + return err + } + + // Tipset listener + + // `ObserveAndBlock` returns the current head and guarantees that it will call the observer with all future tipsets + head, unlockObserver, err := ev.ObserveAndBlock(indexer) + if err != nil { + return xerrors.Errorf("error while observing tipsets: %w", err) + } + if err := indexer.ReconcileWithChain(ctx, head); err != nil { + unlockObserver() + return xerrors.Errorf("error while reconciling chain index with chain state: %w", err) + } + unlockObserver() + + indexer.Start() + + return nil + }, + }) +} + +func ChainIndexHandler(cfg config.ChainIndexerConfig) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, index.Indexer) (*full.ChainIndexHandler, error) { + return func(mctx helpers.MetricsCtx, r repo.LockedRepo, lc fx.Lifecycle, indexer index.Indexer) (*full.ChainIndexHandler, error) { + return full.NewChainIndexHandler(indexer), nil + } +} + +func WaitForMpoolUpdates(ctx context.Context, ch <-chan api.MpoolUpdate, indexer index.Indexer) { + for ctx.Err() == nil { + select { + case <-ctx.Done(): + return + case u := <-ch: + if u.Type != api.MpoolAdd { + continue + } + if u.Message == nil { + continue + } + err := indexer.IndexSignedMessage(ctx, u.Message) + if err != nil { + log.Errorw("failed to index signed Mpool message", "error", err) + } + } + } +} diff --git a/node/modules/ethmodule.go b/node/modules/ethmodule.go index d701cfb0c14..61d957b7fad 100644 --- a/node/modules/ethmodule.go +++ b/node/modules/ethmodule.go @@ -1,9 +1,6 @@ package modules import ( - "context" - "os" - "path/filepath" "time" "github.com/hashicorp/golang-lru/arc/v2" @@ -13,8 +10,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/chain/ethhashlookup" - "github.com/filecoin-project/lotus/chain/events" + "github.com/filecoin-project/lotus/chain/index" "github.com/filecoin-project/lotus/chain/messagepool" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" @@ -25,40 +21,11 @@ import ( "github.com/filecoin-project/lotus/node/repo" ) -func EthTxHashManager(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, *store.ChainStore, EventHelperAPI, *messagepool.MessagePool, full.StateAPI, full.SyncAPI) (full.EthTxHashManager, error) { - return func(mctx helpers.MetricsCtx, r repo.LockedRepo, lc fx.Lifecycle, cs *store.ChainStore, evapi EventHelperAPI, mp *messagepool.MessagePool, stateapi full.StateAPI, syncapi full.SyncAPI) (full.EthTxHashManager, error) { - ctx := helpers.LifecycleCtx(mctx, lc) - - sqlitePath, err := r.SqlitePath() - if err != nil { - return nil, err - } - - dbPath := filepath.Join(sqlitePath, ethhashlookup.DefaultDbFilename) - - // Check if the db exists, if not, we'll back-fill some entries - _, err = os.Stat(dbPath) - dbAlreadyExists := err == nil - - transactionHashLookup, err := ethhashlookup.NewTransactionHashLookup(ctx, dbPath) - if err != nil { - return nil, err - } - - lc.Append(fx.Hook{ - OnStop: func(ctx context.Context) error { - return transactionHashLookup.Close() - }, - }) - - ethTxHashManager := full.NewEthTxHashManager(stateapi, transactionHashLookup) - - if !dbAlreadyExists { - err = ethTxHashManager.PopulateExistingMappings(mctx, 0) - if err != nil { - return nil, err - } - } +func EthModuleAPI(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, *store.ChainStore, *stmgr.StateManager, + EventHelperAPI, *messagepool.MessagePool, full.StateAPI, full.ChainAPI, full.MpoolAPI, full.SyncAPI, *full.EthEventHandler, index.Indexer) (*full.EthModule, error) { + return func(mctx helpers.MetricsCtx, r repo.LockedRepo, lc fx.Lifecycle, cs *store.ChainStore, sm *stmgr.StateManager, evapi EventHelperAPI, + mp *messagepool.MessagePool, stateapi full.StateAPI, chainapi full.ChainAPI, mpoolapi full.MpoolAPI, syncapi full.SyncAPI, + ethEventHandler *full.EthEventHandler, chainIndexer index.Indexer) (*full.EthModule, error) { // prefill the whole skiplist cache maintained internally by the GetTipsetByHeight go func() { @@ -71,37 +38,9 @@ func EthTxHashManager(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.Locke log.Infof("Prefilling GetTipsetByHeight done in %s", time.Since(start)) }() - lc.Append(fx.Hook{ - OnStart: func(context.Context) error { - ev, err := events.NewEvents(ctx, &evapi) - if err != nil { - return err - } - - // Tipset listener - _ = ev.Observe(ethTxHashManager) - - ch, err := mp.Updates(ctx) - if err != nil { - return err - } - go full.WaitForMpoolUpdates(ctx, ch, ethTxHashManager) - go full.EthTxHashGC(ctx, cfg.EthTxHashMappingLifetimeDays, ethTxHashManager) - - return nil - }, - }) - - return ethTxHashManager, nil - } -} - -func EthModuleAPI(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRepo, fx.Lifecycle, *store.ChainStore, *stmgr.StateManager, *messagepool.MessagePool, full.StateAPI, full.ChainAPI, full.MpoolAPI, full.SyncAPI, *full.EthEventHandler, full.EthTxHashManager) (*full.EthModule, error) { - return func(mctx helpers.MetricsCtx, r repo.LockedRepo, lc fx.Lifecycle, cs *store.ChainStore, sm *stmgr.StateManager, mp *messagepool.MessagePool, stateapi full.StateAPI, chainapi full.ChainAPI, mpoolapi full.MpoolAPI, syncapi full.SyncAPI, ethEventHandler *full.EthEventHandler, ethTxHashManager full.EthTxHashManager) (*full.EthModule, error) { - + var err error var blkCache *arc.ARCCache[cid.Cid, *ethtypes.EthBlock] var blkTxCache *arc.ARCCache[cid.Cid, *ethtypes.EthBlock] - var err error if cfg.EthBlkCacheSize > 0 { blkCache, err = arc.NewARC[cid.Cid, *ethtypes.EthBlock](cfg.EthBlkCacheSize) if err != nil { @@ -125,11 +64,12 @@ func EthModuleAPI(cfg config.FevmConfig) func(helpers.MetricsCtx, repo.LockedRep SyncAPI: syncapi, EthEventHandler: ethEventHandler, - EthTxHashManager: ethTxHashManager, EthTraceFilterMaxResults: cfg.EthTraceFilterMaxResults, EthBlkCache: blkCache, EthBlkTxCache: blkTxCache, + + ChainIndexer: chainIndexer, }, nil } } diff --git a/node/modules/msgindex.go b/node/modules/msgindex.go deleted file mode 100644 index 423be65d1b7..00000000000 --- a/node/modules/msgindex.go +++ /dev/null @@ -1,37 +0,0 @@ -package modules - -import ( - "context" - "path/filepath" - - "go.uber.org/fx" - - "github.com/filecoin-project/lotus/chain/index" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/node/modules/helpers" - "github.com/filecoin-project/lotus/node/repo" -) - -func MsgIndex(lc fx.Lifecycle, mctx helpers.MetricsCtx, cs *store.ChainStore, r repo.LockedRepo) (index.MsgIndex, error) { - basePath, err := r.SqlitePath() - if err != nil { - return nil, err - } - - msgIndex, err := index.NewMsgIndex(helpers.LifecycleCtx(mctx, lc), filepath.Join(basePath, index.DefaultDbFilename), cs) - if err != nil { - return nil, err - } - - lc.Append(fx.Hook{ - OnStop: func(_ context.Context) error { - return msgIndex.Close() - }, - }) - - return msgIndex, nil -} - -func DummyMsgIndex() index.MsgIndex { - return index.DummyMsgIndex -} diff --git a/node/modules/stmgr.go b/node/modules/stmgr.go index f3eaee219c5..d07edba1a2b 100644 --- a/node/modules/stmgr.go +++ b/node/modules/stmgr.go @@ -11,8 +11,8 @@ import ( "github.com/filecoin-project/lotus/node/modules/dtypes" ) -func StateManager(lc fx.Lifecycle, cs *store.ChainStore, exec stmgr.Executor, sys vm.SyscallBuilder, us stmgr.UpgradeSchedule, b beacon.Schedule, metadataDs dtypes.MetadataDS, msgIndex index.MsgIndex) (*stmgr.StateManager, error) { - sm, err := stmgr.NewStateManager(cs, exec, sys, us, b, metadataDs, msgIndex) +func StateManager(lc fx.Lifecycle, cs *store.ChainStore, exec stmgr.Executor, sys vm.SyscallBuilder, us stmgr.UpgradeSchedule, b beacon.Schedule, metadataDs dtypes.MetadataDS, chainIndexer index.Indexer) (*stmgr.StateManager, error) { + sm, err := stmgr.NewStateManager(cs, exec, sys, us, b, metadataDs, chainIndexer) if err != nil { return nil, err } diff --git a/node/repo/fsrepo.go b/node/repo/fsrepo.go index 26cbbd6b135..1c2e9f738d4 100644 --- a/node/repo/fsrepo.go +++ b/node/repo/fsrepo.go @@ -37,7 +37,7 @@ const ( fsDatastore = "datastore" fsLock = "repo.lock" fsKeystore = "keystore" - fsSqlite = "sqlite" + fsChainIndex = "chainindex" ) func NewRepoTypeFromString(t string) RepoType { @@ -376,9 +376,9 @@ type fsLockedRepo struct { ssErr error ssOnce sync.Once - sqlPath string - sqlErr error - sqlOnce sync.Once + chainIndexPath string + chainIndexErr error + chainIndexOnce sync.Once storageLk sync.Mutex configLk sync.Mutex @@ -473,19 +473,19 @@ func (fsr *fsLockedRepo) SplitstorePath() (string, error) { return fsr.ssPath, fsr.ssErr } -func (fsr *fsLockedRepo) SqlitePath() (string, error) { - fsr.sqlOnce.Do(func() { - path := fsr.join(fsSqlite) +func (fsr *fsLockedRepo) ChainIndexPath() (string, error) { + fsr.chainIndexOnce.Do(func() { + path := fsr.join(fsChainIndex) if err := os.MkdirAll(path, 0755); err != nil { - fsr.sqlErr = err + fsr.chainIndexErr = err return } - fsr.sqlPath = path + fsr.chainIndexPath = path }) - return fsr.sqlPath, fsr.sqlErr + return fsr.chainIndexPath, fsr.chainIndexErr } // join joins path elements with fsr.path diff --git a/node/repo/interface.go b/node/repo/interface.go index 11c965bf55c..100d0dc58d5 100644 --- a/node/repo/interface.go +++ b/node/repo/interface.go @@ -69,8 +69,8 @@ type LockedRepo interface { // SplitstorePath returns the path for the SplitStore SplitstorePath() (string, error) - // SqlitePath returns the path for the Sqlite database - SqlitePath() (string, error) + // ChainIndexPath returns the path for the chain index database + ChainIndexPath() (string, error) // Returns config in this repo Config() (interface{}, error) diff --git a/node/repo/memrepo.go b/node/repo/memrepo.go index d1e9b214b4a..cda00f985f2 100644 --- a/node/repo/memrepo.go +++ b/node/repo/memrepo.go @@ -268,12 +268,12 @@ func (lmem *lockedMemRepo) SplitstorePath() (string, error) { return splitstorePath, nil } -func (lmem *lockedMemRepo) SqlitePath() (string, error) { - sqlitePath := filepath.Join(lmem.Path(), "sqlite") - if err := os.MkdirAll(sqlitePath, 0755); err != nil { +func (lmem *lockedMemRepo) ChainIndexPath() (string, error) { + chainIndexPath := filepath.Join(lmem.Path(), "chainindex") + if err := os.MkdirAll(chainIndexPath, 0755); err != nil { return "", err } - return sqlitePath, nil + return chainIndexPath, nil } func (lmem *lockedMemRepo) ListDatastores(ns string) ([]int64, error) {