diff --git a/.vscode/launch.json b/.vscode/launch.json index 0ad80e0d..2860f61e 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -14,6 +14,7 @@ ], "outputCapture": "std", "internalConsoleOptions": "openOnSessionStart", + "envFile": "${workspaceFolder}/.env", "env": { "NODE_ENV": "development", "TS_NODE_SKIP_IGNORE": "true" @@ -33,6 +34,7 @@ ], "outputCapture": "std", "internalConsoleOptions": "openOnSessionStart", + "envFile": "${workspaceFolder}/.env", "env": { "NODE_ENV": "development", "TS_NODE_SKIP_IGNORE": "true", @@ -53,6 +55,7 @@ ], "outputCapture": "std", "internalConsoleOptions": "openOnSessionStart", + "envFile": "${workspaceFolder}/.env", "env": { "NODE_ENV": "development", "TS_NODE_SKIP_IGNORE": "true", @@ -60,6 +63,25 @@ }, "killBehavior": "polite", }, + { + "type": "node", + "request": "launch", + "name": "Run: debug server", + "runtimeArgs": [ + "-r", + "ts-node/register" + ], + "args": [ + "${workspaceFolder}/util/debug-server.ts" + ], + "outputCapture": "std", + "internalConsoleOptions": "openOnSessionStart", + "env": { + "NODE_ENV": "development", + "TS_NODE_SKIP_IGNORE": "true", + }, + "killBehavior": "polite", + }, { "type": "node", "request": "launch", diff --git a/CHANGELOG.md b/CHANGELOG.md index cc0219a7..f41dc0c2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,77 @@ +## [4.0.4](https://github.com/hirosystems/ordinals-api/compare/v4.0.3...v4.0.4) (2024-06-17) + + +### Bug Fixes + +* select correct block hash upon migration ([#366](https://github.com/hirosystems/ordinals-api/issues/366)) ([aea1a19](https://github.com/hirosystems/ordinals-api/commit/aea1a19463e82e7fa1f2574df0730a2626eeacc4)) + +## [4.0.3](https://github.com/hirosystems/ordinals-api/compare/v4.0.2...v4.0.3) (2024-06-17) + + +### Bug Fixes + +* add output to current locations and block_hash to inscriptions ([#365](https://github.com/hirosystems/ordinals-api/issues/365)) ([3d75deb](https://github.com/hirosystems/ordinals-api/commit/3d75deb664906eb1c4392ab18d5c77332047a875)) + +## [4.0.2](https://github.com/hirosystems/ordinals-api/compare/v4.0.1...v4.0.2) (2024-06-16) + + +### Bug Fixes + +* optimize brc-20 activity query ([#362](https://github.com/hirosystems/ordinals-api/issues/362)) ([e8a4fea](https://github.com/hirosystems/ordinals-api/commit/e8a4fea29a49cf55bd2291c2bf8349a756130028)) + +## [4.0.1](https://github.com/hirosystems/ordinals-api/compare/v4.0.0...v4.0.1) (2024-06-16) + + +### Bug Fixes + +* optimize base inscriptions query ([#364](https://github.com/hirosystems/ordinals-api/issues/364)) ([cbcc830](https://github.com/hirosystems/ordinals-api/commit/cbcc830a675e5c4dcd29650f1d5e8783ce096989)) + +## [4.0.0](https://github.com/hirosystems/ordinals-api/compare/v3.1.0...v4.0.0) (2024-06-11) + + +### ⚠ BREAKING CHANGES + +* support reinscription transfers (#348) +* ingest BRC20 data from ordhook (#347) + +### Features + +* ingest BRC20 data from ordhook ([#347](https://github.com/hirosystems/ordinals-api/issues/347)) ([56a8851](https://github.com/hirosystems/ordinals-api/commit/56a88518b1ffe549524941e4d94d6347d11c98f3)) +* return `parent` and `metadata` in inscription responses ([#350](https://github.com/hirosystems/ordinals-api/issues/350)) ([939286e](https://github.com/hirosystems/ordinals-api/commit/939286e3a036063835206c533ce7f9a66cee0ba7)) +* support reinscription transfers ([#348](https://github.com/hirosystems/ordinals-api/issues/348)) ([5422156](https://github.com/hirosystems/ordinals-api/commit/5422156e9919f0c5870c9571ea9f591852c98b69)) + + +### Bug Fixes + +* detect block gaps when streaming from ordhook ([#349](https://github.com/hirosystems/ordinals-api/issues/349)) ([3c1480f](https://github.com/hirosystems/ordinals-api/commit/3c1480f5bfb8bec4993fffd50245c345d71cdf08)) +* ordhook ingestion ([#356](https://github.com/hirosystems/ordinals-api/issues/356)) ([dfc003e](https://github.com/hirosystems/ordinals-api/commit/dfc003ee65198a35bac1e0fc723068bde90c63c4)) + +## [4.0.0-beta.2](https://github.com/hirosystems/ordinals-api/compare/v4.0.0-beta.1...v4.0.0-beta.2) (2024-06-11) + + +### Bug Fixes + +* ordhook ingestion ([#356](https://github.com/hirosystems/ordinals-api/issues/356)) ([dfc003e](https://github.com/hirosystems/ordinals-api/commit/dfc003ee65198a35bac1e0fc723068bde90c63c4)) + +## [4.0.0-beta.1](https://github.com/hirosystems/ordinals-api/compare/v3.1.0...v4.0.0-beta.1) (2024-04-26) + + +### ⚠ BREAKING CHANGES + +* support reinscription transfers (#348) +* ingest BRC20 data from ordhook (#347) + +### Features + +* ingest BRC20 data from ordhook ([#347](https://github.com/hirosystems/ordinals-api/issues/347)) ([56a8851](https://github.com/hirosystems/ordinals-api/commit/56a88518b1ffe549524941e4d94d6347d11c98f3)) +* return `parent` and `metadata` in inscription responses ([#350](https://github.com/hirosystems/ordinals-api/issues/350)) ([939286e](https://github.com/hirosystems/ordinals-api/commit/939286e3a036063835206c533ce7f9a66cee0ba7)) +* support reinscription transfers ([#348](https://github.com/hirosystems/ordinals-api/issues/348)) ([5422156](https://github.com/hirosystems/ordinals-api/commit/5422156e9919f0c5870c9571ea9f591852c98b69)) + + +### Bug Fixes + +* detect block gaps when streaming from ordhook ([#349](https://github.com/hirosystems/ordinals-api/issues/349)) ([3c1480f](https://github.com/hirosystems/ordinals-api/commit/3c1480f5bfb8bec4993fffd50245c345d71cdf08)) + ## [3.1.0](https://github.com/hirosystems/ordinals-api/compare/v3.0.1...v3.1.0) (2024-04-23) diff --git a/migrations/1676395230930_inscriptions.ts b/migrations/1676395230930_inscriptions.ts index cbb66d07..d8b216cb 100644 --- a/migrations/1676395230930_inscriptions.ts +++ b/migrations/1676395230930_inscriptions.ts @@ -31,6 +31,7 @@ export function up(pgm: MigrationBuilder): void { }, address: { type: 'text', + notNull: true, }, mime_type: { type: 'text', @@ -89,11 +90,6 @@ export function up(pgm: MigrationBuilder): void { }, }); pgm.createConstraint('inscriptions', 'inscriptions_number_unique', 'UNIQUE(number)'); - pgm.createConstraint( - 'inscriptions', - 'inscriptions_ordinal_number_fk', - 'FOREIGN KEY(ordinal_number) REFERENCES satoshis(ordinal_number) ON DELETE CASCADE' - ); pgm.createIndex('inscriptions', ['mime_type']); pgm.createIndex('inscriptions', ['recursive']); pgm.createIndex('inscriptions', [ @@ -102,4 +98,5 @@ export function up(pgm: MigrationBuilder): void { ]); pgm.createIndex('inscriptions', ['address']); pgm.createIndex('inscriptions', [{ name: 'updated_at', sort: 'DESC' }]); + pgm.createIndex('inscriptions', ['ordinal_number']); } diff --git a/migrations/1677284495299_locations.ts b/migrations/1677284495299_locations.ts index 30894492..3cdcc48d 100644 --- a/migrations/1677284495299_locations.ts +++ b/migrations/1677284495299_locations.ts @@ -28,6 +28,7 @@ export function up(pgm: MigrationBuilder): void { }, address: { type: 'text', + notNull: true, }, output: { type: 'text', @@ -57,11 +58,6 @@ export function up(pgm: MigrationBuilder): void { pgm.createConstraint('locations', 'locations_pkey', { primaryKey: ['ordinal_number', 'block_height', 'tx_index'], }); - pgm.createConstraint( - 'locations', - 'locations_ordinal_number_fk', - 'FOREIGN KEY(ordinal_number) REFERENCES satoshis(ordinal_number) ON DELETE CASCADE' - ); pgm.createIndex('locations', ['output', 'offset']); pgm.createIndex('locations', ['timestamp']); pgm.createIndex('locations', [ diff --git a/migrations/1677284495500_current-locations.ts b/migrations/1677284495500_current-locations.ts index 51f4b8a3..8da71549 100644 --- a/migrations/1677284495500_current-locations.ts +++ b/migrations/1677284495500_current-locations.ts @@ -19,18 +19,10 @@ export function up(pgm: MigrationBuilder): void { }, address: { type: 'text', + notNull: true, }, }); - pgm.createConstraint( - 'current_locations', - 'current_locations_locations_fk', - 'FOREIGN KEY(ordinal_number, block_height, tx_index) REFERENCES locations(ordinal_number, block_height, tx_index) ON DELETE CASCADE' - ); - pgm.createConstraint( - 'locations', - 'locations_satoshis_fk', - 'FOREIGN KEY(ordinal_number) REFERENCES satoshis(ordinal_number) ON DELETE CASCADE' - ); pgm.createIndex('current_locations', ['ordinal_number'], { unique: true }); pgm.createIndex('current_locations', ['address']); + pgm.createIndex('current_locations', ['block_height', 'tx_index']); } diff --git a/migrations/1677284495501_inscription-transfers.ts b/migrations/1677284495501_inscription-transfers.ts index 90b72717..648ef662 100644 --- a/migrations/1677284495501_inscription-transfers.ts +++ b/migrations/1677284495501_inscription-transfers.ts @@ -37,16 +37,6 @@ export function up(pgm: MigrationBuilder): void { pgm.createConstraint('inscription_transfers', 'inscription_transfers_pkey', { primaryKey: ['block_height', 'block_transfer_index'], }); - pgm.createConstraint( - 'inscription_transfers', - 'inscription_transfers_locations_fk', - 'FOREIGN KEY(ordinal_number, block_height, tx_index) REFERENCES locations(ordinal_number, block_height, tx_index) ON DELETE CASCADE' - ); - pgm.createConstraint( - 'inscription_transfers', - 'inscription_transfers_satoshis_fk', - 'FOREIGN KEY(ordinal_number) REFERENCES satoshis(ordinal_number) ON DELETE CASCADE' - ); pgm.createIndex('inscription_transfers', ['genesis_id']); pgm.createIndex('inscription_transfers', ['number']); } diff --git a/migrations/1718498685557_inscriptions-block-hash.ts b/migrations/1718498685557_inscriptions-block-hash.ts new file mode 100644 index 00000000..8ad33f05 --- /dev/null +++ b/migrations/1718498685557_inscriptions-block-hash.ts @@ -0,0 +1,26 @@ +/* eslint-disable @typescript-eslint/naming-convention */ +import { MigrationBuilder, ColumnDefinitions } from 'node-pg-migrate'; + +export const shorthands: ColumnDefinitions | undefined = undefined; + +export function up(pgm: MigrationBuilder): void { + pgm.addColumn('inscriptions', { + block_hash: { + type: 'text', + }, + }); + pgm.sql(` + UPDATE inscriptions SET block_hash = ( + SELECT block_hash FROM locations AS l + WHERE l.ordinal_number = ordinal_number + AND l.block_height = block_height + AND l.tx_index = tx_index + LIMIT 1 + ) + `); + pgm.alterColumn('inscriptions', 'block_hash', { notNull: true }); +} + +export function down(pgm: MigrationBuilder): void { + pgm.dropColumn('inscriptions', 'block_hash'); +} diff --git a/migrations/1718498703916_current-locations-output.ts b/migrations/1718498703916_current-locations-output.ts new file mode 100644 index 00000000..04edfe62 --- /dev/null +++ b/migrations/1718498703916_current-locations-output.ts @@ -0,0 +1,26 @@ +/* eslint-disable @typescript-eslint/naming-convention */ +import { MigrationBuilder, ColumnDefinitions } from 'node-pg-migrate'; + +export const shorthands: ColumnDefinitions | undefined = undefined; + +export function up(pgm: MigrationBuilder): void { + pgm.addColumn('current_locations', { + output: { + type: 'text', + }, + }); + pgm.sql(` + UPDATE current_locations SET output = ( + SELECT output FROM locations AS l + WHERE l.ordinal_number = ordinal_number + AND l.block_height = block_height + AND l.tx_index = tx_index + LIMIT 1 + ) + `); + pgm.alterColumn('current_locations', 'output', { notNull: true }); +} + +export function down(pgm: MigrationBuilder): void { + pgm.dropColumn('current_locations', 'output'); +} diff --git a/package-lock.json b/package-lock.json index f15e53e3..6504f9f0 100644 --- a/package-lock.json +++ b/package-lock.json @@ -14,8 +14,8 @@ "@fastify/multipart": "^7.1.0", "@fastify/swagger": "^8.3.1", "@fastify/type-provider-typebox": "^3.2.0", - "@hirosystems/api-toolkit": "^1.7.0", - "@hirosystems/chainhook-client": "^1.8.0", + "@hirosystems/api-toolkit": "^1.7.1", + "@hirosystems/chainhook-client": "^1.12.0", "@semantic-release/changelog": "^6.0.3", "@semantic-release/commit-analyzer": "^10.0.4", "@semantic-release/git": "^10.0.1", @@ -1250,9 +1250,9 @@ } }, "node_modules/@hirosystems/api-toolkit": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/@hirosystems/api-toolkit/-/api-toolkit-1.7.0.tgz", - "integrity": "sha512-V2RfR3f1qvbF3kTzMZ23iLwjPCKXi49qNgmJikUXOloO1pBP7O1OpMS7VWtk6HOOQ9kkoNXasZsE1hQTlU+Swg==", + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/@hirosystems/api-toolkit/-/api-toolkit-1.7.1.tgz", + "integrity": "sha512-Fx7euWKcQYUrBtAjsDnX6rMkDF+vdY1Yk57QuF3cJ4UcPbO2zwMKRxc4L2aSLOxRePi/DBFTyR2E8axA54Uzaw==", "dependencies": { "@fastify/cors": "^8.0.0", "@fastify/swagger": "^8.3.1", @@ -1272,9 +1272,9 @@ } }, "node_modules/@hirosystems/chainhook-client": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/@hirosystems/chainhook-client/-/chainhook-client-1.8.0.tgz", - "integrity": "sha512-BpYwrbxWuH0KGRyKq1T8nIiZUGaapOxz6yFZ653m6CJi7DS7kqOm2+v5X/DR0hbeZUmqriGMUJnROJ1tW08aEg==", + "version": "1.12.0", + "resolved": "https://registry.npmjs.org/@hirosystems/chainhook-client/-/chainhook-client-1.12.0.tgz", + "integrity": "sha512-FUlYMjnM2CGkxuBR0r+8+HPj+fhpJBJdcuS2e9YFz1NXfE7aDwM4bB5IxlcsJA2a5YAge1tZWeJUdR+TAnv/Rg==", "dependencies": { "@fastify/type-provider-typebox": "^3.2.0", "fastify": "^4.15.0", @@ -19724,9 +19724,9 @@ "requires": {} }, "@hirosystems/api-toolkit": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/@hirosystems/api-toolkit/-/api-toolkit-1.7.0.tgz", - "integrity": "sha512-V2RfR3f1qvbF3kTzMZ23iLwjPCKXi49qNgmJikUXOloO1pBP7O1OpMS7VWtk6HOOQ9kkoNXasZsE1hQTlU+Swg==", + "version": "1.7.1", + "resolved": "https://registry.npmjs.org/@hirosystems/api-toolkit/-/api-toolkit-1.7.1.tgz", + "integrity": "sha512-Fx7euWKcQYUrBtAjsDnX6rMkDF+vdY1Yk57QuF3cJ4UcPbO2zwMKRxc4L2aSLOxRePi/DBFTyR2E8axA54Uzaw==", "requires": { "@fastify/cors": "^8.0.0", "@fastify/swagger": "^8.3.1", @@ -19740,9 +19740,9 @@ } }, "@hirosystems/chainhook-client": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/@hirosystems/chainhook-client/-/chainhook-client-1.8.0.tgz", - "integrity": "sha512-BpYwrbxWuH0KGRyKq1T8nIiZUGaapOxz6yFZ653m6CJi7DS7kqOm2+v5X/DR0hbeZUmqriGMUJnROJ1tW08aEg==", + "version": "1.12.0", + "resolved": "https://registry.npmjs.org/@hirosystems/chainhook-client/-/chainhook-client-1.12.0.tgz", + "integrity": "sha512-FUlYMjnM2CGkxuBR0r+8+HPj+fhpJBJdcuS2e9YFz1NXfE7aDwM4bB5IxlcsJA2a5YAge1tZWeJUdR+TAnv/Rg==", "requires": { "@fastify/type-provider-typebox": "^3.2.0", "fastify": "^4.15.0", diff --git a/package.json b/package.json index 49f73130..8615fea1 100644 --- a/package.json +++ b/package.json @@ -9,7 +9,7 @@ "build": "rimraf ./dist && tsc --project tsconfig.build.json", "start": "node dist/src/index.js", "start-ts": "ts-node ./src/index.ts", - "start:debug-server": "node dist/util/debug-server.js", + "start:debug-server": "ts-node ./util/debug-server.ts", "test": "jest --runInBand", "test:brc-20": "npm run test -- ./tests/brc-20/", "test:api": "npm run test -- ./tests/api/", @@ -56,8 +56,8 @@ "@fastify/multipart": "^7.1.0", "@fastify/swagger": "^8.3.1", "@fastify/type-provider-typebox": "^3.2.0", - "@hirosystems/api-toolkit": "^1.7.0", - "@hirosystems/chainhook-client": "^1.8.0", + "@hirosystems/api-toolkit": "^1.7.1", + "@hirosystems/chainhook-client": "^1.12.0", "@semantic-release/changelog": "^6.0.3", "@semantic-release/commit-analyzer": "^10.0.4", "@semantic-release/git": "^10.0.1", diff --git a/src/env.ts b/src/env.ts index 83ec68a1..27add807 100644 --- a/src/env.ts +++ b/src/env.ts @@ -35,9 +35,9 @@ const schema = Type.Object({ ORDHOOK_NODE_RPC_PORT: Type.Number({ default: 20456, minimum: 0, maximum: 65535 }), /** * Authorization token that the ordhook node must send with every event to make sure it's - * coming from the valid instance + * coming from the valid instance. Leave it undefined if you wish to avoid header validation. */ - ORDHOOK_NODE_AUTH_TOKEN: Type.String(), + ORDHOOK_NODE_AUTH_TOKEN: Type.Optional(Type.String()), /** * Register ordhook predicates automatically when the API is first launched. Set this to `false` * if you're configuring your predicates manually for any reason. @@ -55,6 +55,8 @@ const schema = Type.Object({ { default: 'default', replay: 'replay' }, { default: 'default' } ), + /** If the API should automatically shut down when Ordhook ingestion mode is `replay` */ + ORDHOOK_REPLAY_INGESTION_MODE_AUTO_SHUTDOWN: Type.Boolean({ default: true }), PGHOST: Type.String(), PGPORT: Type.Number({ default: 5432, minimum: 0, maximum: 65535 }), diff --git a/src/ordhook/server.ts b/src/ordhook/server.ts index 19d1decc..cc383229 100644 --- a/src/ordhook/server.ts +++ b/src/ordhook/server.ts @@ -45,10 +45,11 @@ export async function startOrdhookServer(args: { db: PgStore }): Promise { const streamed = payload.chainhook.is_streaming_blocks; - if (ENV.ORDHOOK_INGESTION_MODE === 'replay' && streamed) { + if ( + ENV.ORDHOOK_INGESTION_MODE === 'replay' && + ENV.ORDHOOK_REPLAY_INGESTION_MODE_AUTO_SHUTDOWN && + streamed + ) { logger.info(`OrdhookServer finished replaying blocks, shutting down`); return shutdown(); } @@ -67,5 +72,7 @@ export async function startOrdhookServer(args: { db: PgStore }): Promise 0) + this.revealedNumbers.push(reveal.inscription_number.jubilee); this.increaseMimeTypeCount(mime_type); this.increaseSatRarityCount(satoshi.rarity); this.increaseInscriptionTypeCount(reveal.inscription_number.classic < 0 ? 'cursed' : 'blessed'); @@ -89,8 +92,8 @@ export class BlockCache { tx_id, tx_index: reveal.tx_index, ordinal_number, - address: reveal.inscriber_address, - output: `${satpoint.tx_id}:${satpoint.vout}`, + address: reveal.inscriber_address ?? '', + output, offset: satpoint.offset ?? null, prev_output: null, prev_offset: null, @@ -102,7 +105,8 @@ export class BlockCache { ordinal_number, block_height: this.blockHeight, tx_index: reveal.tx_index, - address: reveal.inscriber_address, + output, + address: reveal.inscriber_address ?? '', }); if (recursive_refs.length > 0) this.recursiveRefs.set(reveal.inscription_id, recursive_refs); } @@ -111,7 +115,8 @@ export class BlockCache { const satpoint = parseSatPoint(transfer.satpoint_post_transfer); const prevSatpoint = parseSatPoint(transfer.satpoint_pre_transfer); const ordinal_number = transfer.ordinal_number.toString(); - const address = transfer.destination.value ?? null; + const address = transfer.destination.value ?? ''; + const output = `${satpoint.tx_id}:${satpoint.vout}`; this.locations.push({ block_hash: this.blockHash, block_height: this.blockHeight, @@ -119,7 +124,7 @@ export class BlockCache { tx_index: transfer.tx_index, ordinal_number, address, - output: `${satpoint.tx_id}:${satpoint.vout}`, + output, offset: satpoint.offset ?? null, prev_output: `${prevSatpoint.tx_id}:${prevSatpoint.vout}`, prev_offset: prevSatpoint.offset ?? null, @@ -135,6 +140,7 @@ export class BlockCache { ordinal_number, block_height: this.blockHeight, tx_index: transfer.tx_index, + output, address, }); } diff --git a/src/pg/brc20/brc20-block-cache.ts b/src/pg/brc20/brc20-block-cache.ts index 3bd35659..5f60d1a6 100644 --- a/src/pg/brc20/brc20-block-cache.ts +++ b/src/pg/brc20/brc20-block-cache.ts @@ -33,14 +33,15 @@ export class Brc20BlockCache { } deploy(operation: BitcoinBrc20DeployOperation, tx_id: string, tx_index: number) { + const zero = BigNumber('0'); this.tokens.push({ block_height: this.blockHeight, genesis_id: operation.deploy.inscription_id, tx_id, address: operation.deploy.address, ticker: operation.deploy.tick, - max: operation.deploy.max, - limit: operation.deploy.lim, + max: BigNumber(operation.deploy.max).toString(), + limit: BigNumber(operation.deploy.lim).toString(), decimals: operation.deploy.dec, self_mint: operation.deploy.self_mint, }); @@ -50,8 +51,8 @@ export class Brc20BlockCache { genesis_id: operation.deploy.inscription_id, ticker: operation.deploy.tick, address: operation.deploy.address, - avail_balance: '0', - trans_balance: '0', + avail_balance: zero.toString(), + trans_balance: zero.toString(), operation: DbBrc20Operation.deploy, }); this.increaseOperationCount(DbBrc20Operation.deploy); @@ -60,36 +61,38 @@ export class Brc20BlockCache { } mint(operation: BitcoinBrc20MintOperation, tx_index: number) { + const zero = BigNumber('0'); + const amt = BigNumber(operation.mint.amt).abs(); this.operations.push({ block_height: this.blockHeight, tx_index, genesis_id: operation.mint.inscription_id, ticker: operation.mint.tick, address: operation.mint.address, - avail_balance: operation.mint.amt, - trans_balance: '0', + avail_balance: amt.toString(), + trans_balance: zero.toString(), operation: DbBrc20Operation.mint, }); - const amt = BigNumber(operation.mint.amt); this.increaseTokenMintedSupply(operation.mint.tick, amt); this.increaseTokenTxCount(operation.mint.tick); this.increaseOperationCount(DbBrc20Operation.mint); this.increaseAddressOperationCount(operation.mint.address, DbBrc20Operation.mint); - this.updateAddressBalance(operation.mint.tick, operation.mint.address, amt, BigNumber(0), amt); + this.updateAddressBalance(operation.mint.tick, operation.mint.address, amt, zero, amt); } transfer(operation: BitcoinBrc20TransferOperation, tx_index: number) { + const zero = BigNumber('0'); + const amt = BigNumber(operation.transfer.amt).abs(); this.operations.push({ block_height: this.blockHeight, tx_index, genesis_id: operation.transfer.inscription_id, ticker: operation.transfer.tick, address: operation.transfer.address, - avail_balance: BigNumber(operation.transfer.amt).negated().toString(), - trans_balance: operation.transfer.amt, + avail_balance: amt.negated().toString(), + trans_balance: amt.toString(), operation: DbBrc20Operation.transfer, }); - const amt = BigNumber(operation.transfer.amt); this.increaseOperationCount(DbBrc20Operation.transfer); this.increaseTokenTxCount(operation.transfer.tick); this.increaseAddressOperationCount(operation.transfer.address, DbBrc20Operation.transfer); @@ -98,19 +101,21 @@ export class Brc20BlockCache { operation.transfer.address, amt.negated(), amt, - BigNumber(0) + zero ); } transferSend(operation: BitcoinBrc20TransferSendOperation, tx_index: number) { + const amt = BigNumber(operation.transfer_send.amt).abs(); + const zero = BigNumber('0'); this.operations.push({ block_height: this.blockHeight, tx_index, genesis_id: operation.transfer_send.inscription_id, ticker: operation.transfer_send.tick, address: operation.transfer_send.sender_address, - avail_balance: '0', - trans_balance: BigNumber(operation.transfer_send.amt).negated().toString(), + avail_balance: zero.toString(), + trans_balance: amt.negated().toString(), operation: DbBrc20Operation.transferSend, }); this.transferReceivers.set( @@ -123,11 +128,10 @@ export class Brc20BlockCache { genesis_id: operation.transfer_send.inscription_id, ticker: operation.transfer_send.tick, address: operation.transfer_send.receiver_address, - avail_balance: operation.transfer_send.amt, + avail_balance: amt.toString(), trans_balance: '0', operation: DbBrc20Operation.transferReceive, }); - const amt = BigNumber(operation.transfer_send.amt); this.increaseOperationCount(DbBrc20Operation.transferSend); this.increaseTokenTxCount(operation.transfer_send.tick); this.increaseAddressOperationCount( @@ -143,7 +147,7 @@ export class Brc20BlockCache { this.updateAddressBalance( operation.transfer_send.tick, operation.transfer_send.sender_address, - BigNumber('0'), + zero, amt.negated(), amt.negated() ); @@ -151,7 +155,7 @@ export class Brc20BlockCache { operation.transfer_send.tick, operation.transfer_send.receiver_address, amt, - BigNumber(0), + zero, amt ); } diff --git a/src/pg/brc20/brc20-pg-store.ts b/src/pg/brc20/brc20-pg-store.ts index 5f727507..137f808a 100644 --- a/src/pg/brc20/brc20-pg-store.ts +++ b/src/pg/brc20/brc20-pg-store.ts @@ -373,49 +373,51 @@ export class Brc20PgStore extends BasePgStoreModule { WHERE ticker IN ${sql(filters.ticker)} ` : sql`SELECT NULL AS count` - }) - SELECT - e.operation, - e.avail_balance, - e.trans_balance, - e.address, - e.to_address, - d.ticker, - e.genesis_id AS inscription_id, - i.block_height, - l.block_hash, - l.tx_id, - l.timestamp, - l.output, - l.offset, - d.max AS deploy_max, - d.limit AS deploy_limit, - d.decimals AS deploy_decimals, - ${ - needsGlobalEventCount || needsAddressEventCount || needsTickerCount - ? sql`(SELECT count FROM event_count)` - : sql`COUNT(*) OVER()` - } AS total - FROM brc20_operations AS e - INNER JOIN brc20_tokens AS d ON d.ticker = e.ticker - INNER JOIN inscriptions AS i ON i.genesis_id = e.genesis_id - INNER JOIN locations AS l ON i.ordinal_number = l.ordinal_number AND e.block_height = l.block_height AND e.tx_index = l.tx_index - WHERE TRUE - ${ - operationsFilter - ? sql`AND e.operation IN ${sql(operationsFilter)}` - : sql`AND e.operation <> 'transfer_receive'` - } - ${filters.ticker ? sql`AND e.ticker IN ${sql(filters.ticker)}` : sql``} - ${filters.block_height ? sql`AND e.block_height = ${filters.block_height}` : sql``} - ${ - filters.address - ? sql`AND (e.address = ${filters.address} OR e.to_address = ${filters.address})` - : sql`` - } - ORDER BY e.block_height DESC, e.tx_index DESC - LIMIT ${page.limit} - OFFSET ${page.offset} + }), + operations AS ( + SELECT + e.operation, + e.avail_balance, + e.trans_balance, + e.address, + e.to_address, + e.ticker, + e.genesis_id AS inscription_id, + d.max AS deploy_max, + d.limit AS deploy_limit, + d.decimals AS deploy_decimals, + i.ordinal_number, + e.block_height, + e.tx_index, + ${ + needsGlobalEventCount || needsAddressEventCount || needsTickerCount + ? sql`(SELECT count FROM event_count)` + : sql`COUNT(*) OVER()` + } AS total + FROM brc20_operations AS e + INNER JOIN brc20_tokens AS d ON d.ticker = e.ticker + INNER JOIN inscriptions AS i ON i.genesis_id = e.genesis_id + WHERE TRUE + ${ + operationsFilter + ? sql`AND e.operation IN ${sql(operationsFilter)}` + : sql`AND e.operation <> 'transfer_receive'` + } + ${filters.ticker ? sql`AND e.ticker IN ${sql(filters.ticker)}` : sql``} + ${filters.block_height ? sql`AND e.block_height = ${filters.block_height}` : sql``} + ${ + filters.address + ? sql`AND (e.address = ${filters.address} OR e.to_address = ${filters.address})` + : sql`` + } + ORDER BY e.block_height DESC, e.tx_index DESC + LIMIT ${page.limit} + OFFSET ${page.offset} + ) + SELECT o.*, l.block_hash, l.tx_id, l.timestamp, l.output, l.offset + FROM operations AS o + INNER JOIN locations AS l USING (ordinal_number, block_height, tx_index) + ORDER BY o.block_height DESC, o.tx_index DESC `; return { total: results[0]?.total ?? 0, diff --git a/src/pg/pg-store.ts b/src/pg/pg-store.ts index 7a739ec7..0a4dea70 100644 --- a/src/pg/pg-store.ts +++ b/src/pg/pg-store.ts @@ -35,7 +35,7 @@ import { BlockCache } from './block-cache'; export const MIGRATIONS_DIR = path.join(__dirname, '../../migrations'); const ORDINALS_GENESIS_BLOCK = 767430; -export const INSERT_BATCH_SIZE = 4000; +export const INSERT_BATCH_SIZE = 3500; type InscriptionIdentifier = { genesis_id: string } | { number: number }; @@ -82,7 +82,8 @@ export class PgStore extends BasePgStore { */ async updateInscriptions(payload: BitcoinPayload): Promise { await this.sqlWriteTransaction(async sql => { - const streamed = payload.chainhook.is_streaming_blocks; + const streamed = + ENV.ORDHOOK_INGESTION_MODE === 'default' && payload.chainhook.is_streaming_blocks; for (const event of payload.rollback) { logger.info(`PgStore rollback block ${event.block_identifier.index}`); const time = stopwatch(); @@ -184,36 +185,34 @@ export class PgStore extends BasePgStore { ...l, timestamp: sql`TO_TIMESTAMP(${l.timestamp})`, })); + // Insert locations, figure out moved inscriptions, insert inscription transfers. for await (const batch of batchIterate(entries, INSERT_BATCH_SIZE)) await sql` - INSERT INTO locations ${sql(batch)} - ON CONFLICT (ordinal_number, block_height, tx_index) DO NOTHING - `; - // Insert block transfers. - let block_transfer_index = 0; - const transferEntries = []; - for (const transfer of cache.locations) { - const transferred = await sql<{ genesis_id: string; number: string }[]>` - SELECT genesis_id, number FROM inscriptions - WHERE ordinal_number = ${transfer.ordinal_number} AND ( - block_height < ${transfer.block_height} - OR (block_height = ${transfer.block_height} AND tx_index < ${transfer.tx_index}) + WITH location_inserts AS ( + INSERT INTO locations ${sql(batch)} + ON CONFLICT (ordinal_number, block_height, tx_index) DO NOTHING + RETURNING ordinal_number, block_height, block_hash, tx_index + ), + prev_transfer_index AS ( + SELECT MAX(block_transfer_index) AS max + FROM inscription_transfers + WHERE block_height = (SELECT block_height FROM location_inserts LIMIT 1) + ), + moved_inscriptions AS ( + SELECT + i.genesis_id, i.number, i.ordinal_number, li.block_height, li.block_hash, li.tx_index, + ( + ROW_NUMBER() OVER (ORDER BY li.block_height ASC, li.tx_index ASC) + (SELECT COALESCE(max, -1) FROM prev_transfer_index) + ) AS block_transfer_index + FROM inscriptions AS i + INNER JOIN location_inserts AS li ON li.ordinal_number = i.ordinal_number + WHERE + i.block_height < li.block_height + OR (i.block_height = li.block_height AND i.tx_index < li.tx_index) ) - `; - for (const inscription of transferred) - transferEntries.push({ - genesis_id: inscription.genesis_id, - number: inscription.number, - ordinal_number: transfer.ordinal_number, - block_height: transfer.block_height, - block_hash: transfer.block_hash, - tx_index: transfer.tx_index, - block_transfer_index: block_transfer_index++, - }); - } - for await (const batch of batchIterate(transferEntries, INSERT_BATCH_SIZE)) - await sql` - INSERT INTO inscription_transfers ${sql(batch)} + INSERT INTO inscription_transfers + (genesis_id, number, ordinal_number, block_height, block_hash, tx_index, block_transfer_index) + (SELECT * FROM moved_inscriptions) ON CONFLICT (block_height, block_transfer_index) DO NOTHING `; } @@ -228,18 +227,20 @@ export class PgStore extends BasePgStore { if (cache.currentLocations.size) { // Deduct counts from previous owners const moved_sats = [...cache.currentLocations.keys()]; - const prevOwners = await sql<{ address: string; count: number }[]>` - SELECT address, COUNT(*) AS count - FROM current_locations - WHERE ordinal_number IN ${sql(moved_sats)} - GROUP BY address - `; - for (const owner of prevOwners) - await sql` - UPDATE counts_by_address - SET count = count - ${owner.count} - WHERE address = ${owner.address} + for await (const batch of batchIterate(moved_sats, INSERT_BATCH_SIZE)) { + const prevOwners = await sql<{ address: string; count: number }[]>` + SELECT address, COUNT(*) AS count + FROM current_locations + WHERE ordinal_number IN ${sql(batch)} + GROUP BY address `; + for (const owner of prevOwners) + await sql` + UPDATE counts_by_address + SET count = count - ${owner.count} + WHERE address = ${owner.address} + `; + } // Insert locations const entries = [...cache.currentLocations.values()]; for await (const batch of batchIterate(entries, INSERT_BATCH_SIZE)) @@ -255,24 +256,25 @@ export class PgStore extends BasePgStore { EXCLUDED.tx_index > current_locations.tx_index) `; // Update owner counts - await sql` - WITH new_owners AS ( - SELECT address, COUNT(*) AS count - FROM current_locations - WHERE ordinal_number IN ${sql(moved_sats)} - GROUP BY address - ) - INSERT INTO counts_by_address (address, count) - (SELECT address, count FROM new_owners) - ON CONFLICT (address) DO UPDATE SET count = counts_by_address.count + EXCLUDED.count - `; - if (streamed) - for await (const batch of batchIterate(moved_sats, INSERT_BATCH_SIZE)) + for await (const batch of batchIterate(moved_sats, INSERT_BATCH_SIZE)) { + await sql` + WITH new_owners AS ( + SELECT address, COUNT(*) AS count + FROM current_locations + WHERE ordinal_number IN ${sql(batch)} + GROUP BY address + ) + INSERT INTO counts_by_address (address, count) + (SELECT address, count FROM new_owners) + ON CONFLICT (address) DO UPDATE SET count = counts_by_address.count + EXCLUDED.count + `; + if (streamed) await sql` UPDATE inscriptions SET updated_at = NOW() WHERE ordinal_number IN ${sql(batch)} `; + } } await this.counts.applyCounts(sql, cache); } @@ -328,9 +330,9 @@ export class PgStore extends BasePgStore { if (cache.currentLocations.size) { for (const ordinal_number of moved_sats) { await sql` - INSERT INTO current_locations (ordinal_number, block_height, tx_index, address) + INSERT INTO current_locations (ordinal_number, block_height, tx_index, output, address) ( - SELECT ordinal_number, block_height, tx_index, address + SELECT ordinal_number, block_height, tx_index, output, address FROM locations WHERE ordinal_number = ${ordinal_number} ORDER BY block_height DESC, tx_index DESC @@ -503,136 +505,141 @@ export class PgStore extends BasePgStore { orderBy = sql`ARRAY_POSITION(ARRAY['common','uncommon','rare','epic','legendary','mythic'], s.rarity) ${order}, i.number DESC`; break; } - // This function will generate a query to be used for getting results or total counts. - const query = ( - columns: postgres.PendingQuery, - sorting: postgres.PendingQuery - ) => sql` - SELECT ${columns} - FROM inscriptions AS i - INNER JOIN current_locations AS cur ON cur.ordinal_number = i.ordinal_number - INNER JOIN locations AS cur_l ON cur_l.ordinal_number = cur.ordinal_number AND cur_l.block_height = cur.block_height AND cur_l.tx_index = cur.tx_index - INNER JOIN locations AS gen_l ON gen_l.ordinal_number = i.ordinal_number AND gen_l.block_height = i.block_height AND gen_l.tx_index = i.tx_index - INNER JOIN satoshis AS s ON s.ordinal_number = i.ordinal_number - WHERE TRUE - ${ - filters?.genesis_id?.length - ? sql`AND i.genesis_id IN ${sql(filters.genesis_id)}` - : sql`` - } - ${ - filters?.genesis_block_height - ? sql`AND i.block_height = ${filters.genesis_block_height}` - : sql`` - } - ${ - filters?.genesis_block_hash - ? sql`AND gen_l.block_hash = ${filters.genesis_block_hash}` - : sql`` - } - ${ - filters?.from_genesis_block_height - ? sql`AND i.block_height >= ${filters.from_genesis_block_height}` - : sql`` - } - ${ - filters?.to_genesis_block_height - ? sql`AND i.block_height <= ${filters.to_genesis_block_height}` - : sql`` - } - ${ - filters?.from_sat_coinbase_height - ? sql`AND s.coinbase_height >= ${filters.from_sat_coinbase_height}` - : sql`` - } - ${ - filters?.to_sat_coinbase_height - ? sql`AND s.coinbase_height <= ${filters.to_sat_coinbase_height}` - : sql`` - } - ${ - filters?.from_genesis_timestamp - ? sql`AND i.timestamp >= to_timestamp(${filters.from_genesis_timestamp})` - : sql`` - } - ${ - filters?.to_genesis_timestamp - ? sql`AND i.timestamp <= to_timestamp(${filters.to_genesis_timestamp})` - : sql`` - } - ${ - filters?.from_sat_ordinal - ? sql`AND i.ordinal_number >= ${filters.from_sat_ordinal}` - : sql`` - } - ${ - filters?.to_sat_ordinal ? sql`AND i.ordinal_number <= ${filters.to_sat_ordinal}` : sql`` - } - ${filters?.number?.length ? sql`AND i.number IN ${sql(filters.number)}` : sql``} - ${ - filters?.from_number !== undefined ? sql`AND i.number >= ${filters.from_number}` : sql`` - } - ${filters?.to_number !== undefined ? sql`AND i.number <= ${filters.to_number}` : sql``} - ${filters?.address?.length ? sql`AND cur.address IN ${sql(filters.address)}` : sql``} - ${filters?.mime_type?.length ? sql`AND i.mime_type IN ${sql(filters.mime_type)}` : sql``} - ${filters?.output ? sql`AND cur_l.output = ${filters.output}` : sql``} - ${filters?.sat_rarity?.length ? sql`AND s.rarity IN ${sql(filters.sat_rarity)}` : sql``} - ${filters?.sat_ordinal ? sql`AND i.ordinal_number = ${filters.sat_ordinal}` : sql``} - ${filters?.recursive !== undefined ? sql`AND i.recursive = ${filters.recursive}` : sql``} - ${filters?.cursed === true ? sql`AND i.number < 0` : sql``} - ${filters?.cursed === false ? sql`AND i.number >= 0` : sql``} - ${ - filters?.genesis_address?.length - ? sql`AND i.address IN ${sql(filters.genesis_address)}` - : sql`` - } - ${sorting} - `; - const results = await sql`${query( - sql` - i.genesis_id, - i.number, - i.mime_type, - i.content_type, - i.content_length, - i.fee AS genesis_fee, - i.curse_type, - i.ordinal_number AS sat_ordinal, - i.parent, - i.metadata, - s.rarity AS sat_rarity, - s.coinbase_height AS sat_coinbase_height, - i.recursive, - ( - SELECT STRING_AGG(ir.ref_genesis_id, ',') - FROM inscription_recursions AS ir - WHERE ir.genesis_id = i.genesis_id - ) AS recursion_refs, - i.block_height AS genesis_block_height, + // Do we need a filtered `COUNT(*)`? If so, try to use the pre-calculated counts we have in + // cached tables to speed up these queries. + const countType = getIndexResultCountType(filters); + const total = await this.counts.fromResults(countType, filters); + const results = await sql<(DbFullyLocatedInscriptionResult & { total: number })[]>` + WITH results AS ( + SELECT + i.genesis_id, + i.number, + i.mime_type, + i.content_type, + i.content_length, + i.fee AS genesis_fee, + i.curse_type, + i.ordinal_number AS sat_ordinal, + i.parent, + i.metadata, + s.rarity AS sat_rarity, + s.coinbase_height AS sat_coinbase_height, + i.recursive, + ( + SELECT STRING_AGG(ir.ref_genesis_id, ',') + FROM inscription_recursions AS ir + WHERE ir.genesis_id = i.genesis_id + ) AS recursion_refs, + i.block_height AS genesis_block_height, + i.tx_index AS genesis_tx_index, + i.timestamp AS genesis_timestamp, + i.address AS genesis_address, + cur.address, + cur.tx_index, + cur.block_height, + ${total === undefined ? sql`COUNT(*) OVER() AS total` : sql`0 AS total`}, + ROW_NUMBER() OVER(ORDER BY ${orderBy}) AS row_num + FROM inscriptions AS i + INNER JOIN current_locations AS cur ON cur.ordinal_number = i.ordinal_number + INNER JOIN satoshis AS s ON s.ordinal_number = i.ordinal_number + WHERE TRUE + ${ + filters?.genesis_id?.length + ? sql`AND i.genesis_id IN ${sql(filters.genesis_id)}` + : sql`` + } + ${ + filters?.genesis_block_height + ? sql`AND i.block_height = ${filters.genesis_block_height}` + : sql`` + } + ${ + filters?.genesis_block_hash + ? sql`AND i.block_hash = ${filters.genesis_block_hash}` + : sql`` + } + ${ + filters?.from_genesis_block_height + ? sql`AND i.block_height >= ${filters.from_genesis_block_height}` + : sql`` + } + ${ + filters?.to_genesis_block_height + ? sql`AND i.block_height <= ${filters.to_genesis_block_height}` + : sql`` + } + ${ + filters?.from_sat_coinbase_height + ? sql`AND s.coinbase_height >= ${filters.from_sat_coinbase_height}` + : sql`` + } + ${ + filters?.to_sat_coinbase_height + ? sql`AND s.coinbase_height <= ${filters.to_sat_coinbase_height}` + : sql`` + } + ${ + filters?.from_genesis_timestamp + ? sql`AND i.timestamp >= to_timestamp(${filters.from_genesis_timestamp})` + : sql`` + } + ${ + filters?.to_genesis_timestamp + ? sql`AND i.timestamp <= to_timestamp(${filters.to_genesis_timestamp})` + : sql`` + } + ${ + filters?.from_sat_ordinal + ? sql`AND i.ordinal_number >= ${filters.from_sat_ordinal}` + : sql`` + } + ${ + filters?.to_sat_ordinal + ? sql`AND i.ordinal_number <= ${filters.to_sat_ordinal}` + : sql`` + } + ${filters?.number?.length ? sql`AND i.number IN ${sql(filters.number)}` : sql``} + ${ + filters?.from_number !== undefined + ? sql`AND i.number >= ${filters.from_number}` + : sql`` + } + ${filters?.to_number !== undefined ? sql`AND i.number <= ${filters.to_number}` : sql``} + ${filters?.address?.length ? sql`AND cur.address IN ${sql(filters.address)}` : sql``} + ${ + filters?.mime_type?.length ? sql`AND i.mime_type IN ${sql(filters.mime_type)}` : sql`` + } + ${filters?.output ? sql`AND cur.output = ${filters.output}` : sql``} + ${filters?.sat_rarity?.length ? sql`AND s.rarity IN ${sql(filters.sat_rarity)}` : sql``} + ${filters?.sat_ordinal ? sql`AND i.ordinal_number = ${filters.sat_ordinal}` : sql``} + ${ + filters?.recursive !== undefined ? sql`AND i.recursive = ${filters.recursive}` : sql`` + } + ${filters?.cursed === true ? sql`AND i.number < 0` : sql``} + ${filters?.cursed === false ? sql`AND i.number >= 0` : sql``} + ${ + filters?.genesis_address?.length + ? sql`AND i.address IN ${sql(filters.genesis_address)}` + : sql`` + } + ORDER BY ${orderBy} LIMIT ${page.limit} OFFSET ${page.offset} + ) + SELECT + r.*, gen_l.block_hash AS genesis_block_hash, gen_l.tx_id AS genesis_tx_id, - i.timestamp AS genesis_timestamp, - i.address AS genesis_address, cur_l.tx_id, - cur.address, cur_l.output, cur_l.offset, cur_l.timestamp, cur_l.value - `, - sql`ORDER BY ${orderBy} LIMIT ${page.limit} OFFSET ${page.offset}` - )}`; - // Do we need a filtered `COUNT(*)`? If so, try to use the pre-calculated counts we have in - // cached tables to speed up these queries. - const countType = getIndexResultCountType(filters); - let total = await this.counts.fromResults(countType, filters); - if (total === undefined) { - // If the count is more complex, attempt it with a separate query. - const count = await sql<{ total: number }[]>`${query(sql`COUNT(*) AS total`, sql``)}`; - total = count[0].total; - } + FROM results AS r + INNER JOIN locations AS cur_l ON cur_l.ordinal_number = r.sat_ordinal AND cur_l.block_height = r.block_height AND cur_l.tx_index = r.tx_index + INNER JOIN locations AS gen_l ON gen_l.ordinal_number = r.sat_ordinal AND gen_l.block_height = r.genesis_block_height AND gen_l.tx_index = r.genesis_tx_index + ORDER BY r.row_num ASC + `; return { - total, + total: total ?? results[0]?.total ?? 0, results: results ?? [], }; }); diff --git a/src/pg/types.ts b/src/pg/types.ts index e72e7db9..57696048 100644 --- a/src/pg/types.ts +++ b/src/pg/types.ts @@ -14,8 +14,9 @@ export type DbInscriptionInsert = { number: number; classic_number: number; block_height: number; + block_hash: string; tx_index: number; - address: string | null; + address: string; mime_type: string; content_type: string; content_length: number; @@ -38,7 +39,7 @@ export type DbLocationInsert = { block_hash: string; tx_index: number; tx_id: string; - address: string | null; + address: string; output: string; offset: PgNumeric | null; prev_output: string | null; @@ -52,22 +53,8 @@ export type DbCurrentLocationInsert = { ordinal_number: PgNumeric; block_height: number; tx_index: number; - address: string | null; -}; - -type AbstractLocationData = { - block_height: number; - block_hash: string; - tx_id: string; - tx_index: number; - address: string | null; output: string; - offset: PgNumeric | null; - prev_output: string | null; - prev_offset: PgNumeric | null; - value: PgNumeric | null; - transfer_type: DbLocationTransferType; - block_transfer_index: number | null; + address: string; }; /** @@ -124,7 +111,7 @@ export type DbLocation = { block_hash: string; tx_id: string; tx_index: number; - address: string | null; + address: string; output: string; offset: string | null; prev_output: string | null; @@ -139,7 +126,7 @@ export type DbInscriptionLocationChange = { from_block_height: string; from_block_hash: string; from_tx_id: string; - from_address: string | null; + from_address: string; from_output: string; from_offset: string | null; from_value: string | null; @@ -147,7 +134,7 @@ export type DbInscriptionLocationChange = { to_block_height: string; to_block_hash: string; to_tx_id: string; - to_address: string | null; + to_address: string; to_output: string; to_offset: string | null; to_value: string | null; diff --git a/tests/api/inscriptions.test.ts b/tests/api/inscriptions.test.ts index 648a6406..74a7a06d 100644 --- a/tests/api/inscriptions.test.ts +++ b/tests/api/inscriptions.test.ts @@ -3188,7 +3188,7 @@ describe('/inscriptions', () => { new TestChainhookPayloadBuilder() .rollback() .block({ - height: 775618, + height: 778575, hash: '000000000000000000032ef6c45a69c0496456b3cae84ee9f2899f636d03c5ac', timestamp: 1675312161, }) diff --git a/tests/ordhook/server.test.ts b/tests/ordhook/server.test.ts index 4f8a4d23..17cf8a70 100644 --- a/tests/ordhook/server.test.ts +++ b/tests/ordhook/server.test.ts @@ -126,7 +126,7 @@ describe('EventServer', () => { const payload2 = new TestChainhookPayloadBuilder() .rollback() .block({ - height: 107, + height: 767430, hash: '0x163de66dc9c0949905bfe8e148bde04600223cf88d19f26fdbeba1d6e6fa0f88', timestamp: 1676913207, }) diff --git a/util/debug-server.ts b/util/debug-server.ts index 3537408a..2ea49d17 100644 --- a/util/debug-server.ts +++ b/util/debug-server.ts @@ -23,17 +23,18 @@ import * as path from 'path'; const serverOpts: ServerOptions = { hostname: ENV.API_HOST, port: ENV.EVENT_PORT, - auth_token: ENV.ORDHOOK_NODE_AUTH_TOKEN, + auth_token: ENV.ORDHOOK_NODE_AUTH_TOKEN ?? '', external_base_url: `http://${ENV.EXTERNAL_HOSTNAME}`, wait_for_chainhook_node: false, validate_chainhook_payloads: false, + validate_token_authorization: false, body_limit: ENV.EVENT_SERVER_BODY_LIMIT, node_type: 'ordhook', }; const ordhookOpts: ChainhookNodeOptions = { base_url: ORDHOOK_BASE_PATH, }; -const dirPath = path.join(__dirname, '../../tmp/debug-server/'); +const dirPath = path.join(__dirname, '../tmp/debug-server/'); fs.mkdirSync(dirPath, { recursive: true }); logger.info(`DebugServer saving outputs to ${dirPath}`); @@ -41,7 +42,7 @@ const server = new ChainhookEventObserver(serverOpts, ordhookOpts); server .start([], async (uuid: string, payload: Payload) => { logger.info(`DebugServer received payload from predicate ${uuid}`); - const filePath = path.join(dirPath, `${new Date().getTime()}.txt`); + const filePath = path.join(dirPath, `${payload.apply[0].block_identifier.index}.txt`); fs.writeFileSync(filePath, JSON.stringify(payload, null, 2)); return Promise.resolve(); })