diff --git a/backend/package.json b/backend/package.json index 915067f1..38ba5bec 100644 --- a/backend/package.json +++ b/backend/package.json @@ -112,8 +112,9 @@ "deploy-worker-prod": "./tools/deploy-worker.sh crossfeed-prod-worker", "syncdb": "docker-compose exec -T backend npx ts-node src/tools/run-syncdb.ts", "pesyncdb": "docker-compose exec -T backend npx ts-node src/tools/run-pesyncdb.ts", - "control-queue": "docker-compose exec -T backend npx ts-node src/tools/consumeControlQueue.ts" + "scan-exec": "docker-compose exec -T backend npx ts-node src/tools/run-scanExecution.ts", + "send-message": "node sendMessage.js" }, "author": "", "license": "ISC" -} +} \ No newline at end of file diff --git a/backend/sendMessage.js b/backend/sendMessage.js index 6e875286..e868adb8 100644 --- a/backend/sendMessage.js +++ b/backend/sendMessage.js @@ -1,19 +1,18 @@ // sendMessage.js const amqp = require('amqplib'); -async function sendMessageToControlQueue(message) { +async function sendMessageToQueue(message, queue) { const connection = await amqp.connect('amqp://localhost'); const channel = await connection.createChannel(); - const controlQueue = 'ControlQueue'; - await channel.assertQueue(controlQueue, { durable: true }); + await channel.assertQueue(queue, { durable: true }); - // Simulate sending a message to the ControlQueue - channel.sendToQueue(controlQueue, Buffer.from(JSON.stringify(message)), { + // Simulate sending a message to the queue + channel.sendToQueue(queue, Buffer.from(JSON.stringify(message)), { persistent: true }); - console.log('Message sent to ControlQueue:', message); + console.log('Message sent:', message); setTimeout(() => { connection.close(); @@ -22,7 +21,12 @@ async function sendMessageToControlQueue(message) { // Simulate sending a message const message = { - scriptType: 'shodan', + scriptType: 'dnstwist', org: 'DHS' }; -sendMessageToControlQueue(message); +const queue = 'dnstwistQueue'; +sendMessageToQueue(message, queue); +sendMessageToQueue(message, queue); +sendMessageToQueue(message, queue); +sendMessageToQueue(message, queue); +sendMessageToQueue(message, queue); diff --git a/backend/src/tasks/ecs-client.ts b/backend/src/tasks/ecs-client.ts index 7b9e806e..fdc62d58 100644 --- a/backend/src/tasks/ecs-client.ts +++ b/backend/src/tasks/ecs-client.ts @@ -81,7 +81,7 @@ class ECSClient { // In order to use the host name "db" to access the database from the // crossfeed-worker image, we must launch the Docker container with // the Crossfeed backend network. - NetworkMode: 'crossfeed_backend', + NetworkMode: 'xfd_backend', Memory: 4000000000 // Limit memory to 4 GB. We do this locally to better emulate fargate memory conditions. TODO: In the future, we could read the exact memory from SCAN_SCHEMA to better emulate memory requirements for each scan. }, Env: [ diff --git a/backend/src/tasks/scanExecution.ts b/backend/src/tasks/scanExecution.ts index 8da85ad5..a90bf12b 100644 --- a/backend/src/tasks/scanExecution.ts +++ b/backend/src/tasks/scanExecution.ts @@ -3,9 +3,11 @@ import * as AWS from 'aws-sdk'; import { integer } from 'aws-sdk/clients/cloudfront'; const ecs = new AWS.ECS(); -let docker; +let docker: any; + if (process.env.IS_LOCAL) { - docker = require('dockerode'); + const Docker = require('dockerode'); + docker = new Docker(); } const toSnakeCase = (input) => input.replace(/ /g, '-'); @@ -90,7 +92,7 @@ async function startLocalContainers( // In order to use the host name "db" to access the database from the // crossfeed-worker image, we must launch the Docker container with // the Crossfeed backend network. - NetworkMode: 'crossfeed_backend', + NetworkMode: 'xfd_backend', Memory: 4000000000 // Limit memory to 4 GB. We do this locally to better emulate fargate memory conditions. TODO: In the future, we could read the exact memory from SCAN_SCHEMA to better emulate memory requirements for each scan. }, Env: [ @@ -158,28 +160,24 @@ export const handler: Handler = async (event) => { process.env.SHODAN_QUEUE_URL! ); } else if (scanType === 'dnstwist') { - desiredCount = 30; await startDesiredTasks( scanType, desiredCount, process.env.DNSTWIST_QUEUE_URL! ); } else if (scanType === 'hibp') { - desiredCount = 20; await startDesiredTasks( scanType, desiredCount, process.env.HIBP_QUEUE_URL! ); } else if (scanType === 'intelx') { - desiredCount = 10; await startDesiredTasks( scanType, desiredCount, process.env.INTELX_QUEUE_URL! ); } else if (scanType === 'cybersixgill') { - desiredCount = 10; await startDesiredTasks( scanType, desiredCount, diff --git a/backend/src/tools/consumeControlQueue.ts b/backend/src/tools/consumeControlQueue.ts deleted file mode 100644 index 87289464..00000000 --- a/backend/src/tools/consumeControlQueue.ts +++ /dev/null @@ -1,42 +0,0 @@ -// Script to setup Control Queue locally so when messages are sent to it, -// the scanExecution lambda is triggered -import { handler as scanExecution } from '../tasks/scanExecution'; -const amqp = require('amqplib'); -import * as dotenv from 'dotenv'; -import * as path from 'path'; - -async function consumeControlQueue() { - // Load the environment variables from the .env file - const envPath = path.resolve(__dirname, '../../.env'); - dotenv.config({ path: envPath }); - console.log(process.env.SHODAN_QUEUE_URL); - - // Connect to RabbitMQ - const connection = await amqp.connect('amqp://rabbitmq'); - const channel = await connection.createChannel(); - const controlQueue = 'ControlQueue'; - - await channel.assertQueue(controlQueue, { durable: true }); - - console.log('Waiting for messages from ControlQueue...'); - - channel.consume(controlQueue, (message) => { - if (message !== null) { - const payload = JSON.parse(message.content.toString()); - - // Trigger your local Lambda function here - console.log('Received message:', payload); - - // Call scanExecution with the payload from message - scanExecution( - { Records: [{ body: JSON.stringify(payload) }] }, - {} as any, - () => null - ); - - channel.ack(message); - } - }); -} - -consumeControlQueue(); diff --git a/backend/src/tools/run-scanExecution.ts b/backend/src/tools/run-scanExecution.ts new file mode 100644 index 00000000..8e0fccab --- /dev/null +++ b/backend/src/tools/run-scanExecution.ts @@ -0,0 +1,10 @@ +// Script to execute the scanExecution function +import { handler as scanExecution } from '../tasks/scanExecution'; + +async function localScanExecution() { + console.log('Starting...'); + const payload = { scanType: 'dnstwist', desiredCount: 3 }; + scanExecution(payload, {} as any, () => null); +} + +localScanExecution(); diff --git a/docs/src/documentation-pages/dev/quickstart.md b/docs/src/documentation-pages/dev/quickstart.md index c6744d18..4af374cc 100644 --- a/docs/src/documentation-pages/dev/quickstart.md +++ b/docs/src/documentation-pages/dev/quickstart.md @@ -62,22 +62,23 @@ This quickstart describes the initial setup required to run an instance of Cross npm run pesyncdb ``` -4. Start the RabbitMQ listener. This will listen for any messages sent to the queue and - trigger the scanExecution.ts function. This will stay running with this message: "Waiting for messages from ControlQueue..." +4. Send messages to RabbitMQ queue. First, edit backend/nodeMessage.js to run the desired scan and + organization. Then run below:" ```bash cd backend - npm run control-queue + npm run send-message ``` -5. Run sendMessage.js to send a sample message to the queue. Feel free to edit this file - while testing. +4. Invoke scans by running below. You can edit the backend/src/tools/run-scanExecution.ts to run the desired scan type." ```bash cd backend - node sendMessage.js + npm run scan-exec ``` +5. Observe logs in docker containers. + ### Running tests To run tests, first make sure you have already started Crossfeed with `npm start` (or, at bare minimum, that the database container is running). Then run: