diff --git a/Makefile b/Makefile index 46af27b..1068254 100644 --- a/Makefile +++ b/Makefile @@ -119,7 +119,7 @@ fmt: docker init-%: docker $(TERRASPACE) init $* -install: docker +install: docker zip_lambdas $(DOCKER_RUN) $(IMAGE) -ic "YARN_SILENT=1 yarn install --ignore-optional && YARN_SILENT=1 yarn --cwd scripts install" ## logs: Shows last 10 lines of all Terraspace logs @@ -187,3 +187,7 @@ update-launchpad: ## validate-STACK: Runs `terraform validate` for specified STACK validate-%: docker $(TERRASPACE) validate $* + +## Zip any lambda functions to prepare for deployment +zip_lambdas: + sh app/stacks/post-deploy-mods/resources/lambdas/pre-filter-DistributionApiEndpoints/zip_lambda.sh diff --git a/app/stacks/post-deploy-mods/main.tf b/app/stacks/post-deploy-mods/main.tf new file mode 100644 index 0000000..e8c020b --- /dev/null +++ b/app/stacks/post-deploy-mods/main.tf @@ -0,0 +1,119 @@ +# main.tf for post-deploy-mods + +# Define the Lambda Function +resource "aws_lambda_function" "pre_filter_DistributionApiEndpoints" { + # function_name = "ks-test-pre-filter-DistributionApiEndpoints" + function_name = "${var.prefix}-pre-filter-DistributionApiEndpoints" + filename = "${path.module}/resources/lambdas/pre-filter-DistributionApiEndpoints/distro/lambda.zip" + role = aws_iam_role.lambda_exec_pre_filter_DistributionApiEndpoints.arn + handler = "index.preFilterDistributionApiEndpointsHandler" + runtime = "python3.10" #local.lambda_runtime + timeout = 300 + memory_size = 3008 + + source_code_hash = filebase64sha256("${path.module}/resources/lambdas/pre-filter-DistributionApiEndpoints/distro/lambda.zip") +} + +# Define the Execution Role and Policy +resource "aws_iam_role" "lambda_exec_pre_filter_DistributionApiEndpoints" { + #name = "lambda_exec_role_pre_filter_DistributionApiEndpoints" + name = "${var.prefix}-lambda_exe_role_pf_DistApiEndpoints" # Must be 64 chars or less + + assume_role_policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Action = "sts:AssumeRole" + Effect = "Allow" + Sid = "" + Principal = { + Service = "lambda.amazonaws.com" + } + }, + ] + }) +} + +# Define an attachment to the aws_iam_role above +resource "aws_iam_role_policy_attachment" "lambda_exec_policy" { + role = aws_iam_role.lambda_exec_pre_filter_DistributionApiEndpoints.name + policy_arn = "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole" +} + +# Define another policy attachment to allow invoking of another lambda +resource "aws_iam_policy" "lambda_invoke_policy" { + #name = "lambda_invoke_policy" + name = "${var.prefix}-lambda_invoke_policy" + description = "Policy to allow Lambda functions to invoke other Lambda functions" + policy = jsonencode({ + Version = "2012-10-17" + Statement = [ + { + Effect = "Allow" + Action = [ + "lambda:InvokeFunction" + ] + Resource = "*" + } + ] + }) +} + +# Attach the Policy, which allows a Lambda to be Invoked, to the Lambda Role +resource "aws_iam_role_policy_attachment" "lambda_invoke_policy_attachment" { + role = aws_iam_role.lambda_exec_pre_filter_DistributionApiEndpoints.name + policy_arn = aws_iam_policy.lambda_invoke_policy.arn +} + +# Fetch existing API Gateway +data "aws_api_gateway_rest_api" "distribution_api" { + name = "${var.prefix}-distribution" # Example "cumulus-uat-distribution" +} + +# Fetch the proxy resource (API Gateway "/{proxy+}" prop) +data "aws_api_gateway_resource" "proxy_resource" { + rest_api_id = data.aws_api_gateway_rest_api.distribution_api.id + path = "/{proxy+}" +} + +# No need to update the root resource +# The way this is all set up, we only want to override where the file is downloaded +# That happens only when the proxy is invoked +# +# # If we need to update the root resource than, uncomment this code +# Fetch the root resource (API Gateway "/" prop) +# +#data "aws_api_gateway_resource" "root_resource" { +# rest_api_id = data.aws_api_gateway_rest_api.distribution_api.id +# path = "/" +#} +# +# +## Update the integration for the root resource with GET method +#resource "aws_api_gateway_integration" "root_lambda_integration" { +# rest_api_id = data.aws_api_gateway_rest_api.distribution_api.id +# resource_id = data.aws_api_gateway_resource.root_resource.id +# http_method = "GET" +# integration_http_method = "POST" #"GET" +# type = "AWS_PROXY" +# uri = aws_lambda_function.pre_filter_DistributionApiEndpoints.invoke_arn +#} + +# Update the integration for the root resource with GET method +resource "aws_api_gateway_integration" "proxy_lambda_integration" { + rest_api_id = data.aws_api_gateway_rest_api.distribution_api.id + resource_id = data.aws_api_gateway_resource.proxy_resource.id + http_method = "ANY" + integration_http_method = "POST" #"GET" + type = "AWS_PROXY" + uri = aws_lambda_function.pre_filter_DistributionApiEndpoints.invoke_arn +} + +# Ensure the Lambda function as the necessary permissions to be invoked by API Gateway +resource "aws_lambda_permission" "api_gateway" { + statement_id = "AllowAPIGatewayInvoke" + action = "lambda:InvokeFunction" + function_name = aws_lambda_function.pre_filter_DistributionApiEndpoints.function_name + principal = "apigateway.amazonaws.com" + source_arn = "${data.aws_api_gateway_rest_api.distribution_api.execution_arn}/*/*" +} diff --git a/app/stacks/post-deploy-mods/resources/lambdas/pre-filter-DistributionApiEndpoints/distro/lambda.zip b/app/stacks/post-deploy-mods/resources/lambdas/pre-filter-DistributionApiEndpoints/distro/lambda.zip new file mode 100644 index 0000000..ea0607d Binary files /dev/null and b/app/stacks/post-deploy-mods/resources/lambdas/pre-filter-DistributionApiEndpoints/distro/lambda.zip differ diff --git a/app/stacks/post-deploy-mods/resources/lambdas/pre-filter-DistributionApiEndpoints/distro/lambda_function.py b/app/stacks/post-deploy-mods/resources/lambdas/pre-filter-DistributionApiEndpoints/distro/lambda_function.py new file mode 100644 index 0000000..0e8ef26 --- /dev/null +++ b/app/stacks/post-deploy-mods/resources/lambdas/pre-filter-DistributionApiEndpoints/distro/lambda_function.py @@ -0,0 +1,367 @@ +# Imports Section +import json + +# For catching generic errors +import sys + +# To call another lambda, from this lambda +import boto3 + +# SETTINGS +# +# This function's name (for logging purposes) +#this_function_name = "cumulus-prod-pre-filter-DistributionApiEndpoints" +this_function_name = "cumulus-kris-sbx7894-pre-filter-DistributionApiEndpoints" + +# +# If this is set to False, this function does nothing extra than the original lambda did, it just allows a pass through +# If this is set to True, this function does the normal request, then attempts to detect a request for a file, if this IS a file request, then it checks EULA permissions for the current user +is_post_EULA_filter_enabled = True # True # False +# +# Which dynamo DB table holds the Access Tokens after a succesful authentication? +#dynamo_db__table_name = 'cumulus-prod-DistributionAccessTokensTable' +dynamo_db__table_name = 'cumulus-kris-sbx7894-DistributionAccessTokensTable' + +# +# All Possible Dataset directories by Vendor - When a new vendor or dataset is added, add to this list. +vendor_to_dataset_map = { + 'planet': + [ + 'planet', + 'PSScene3Band___1' + ], + 'maxar': + [ + 'WV04_MSI_L1B___1', 'WV04_Pan_L1B___1', + 'WV03_MSI_L1B___1', 'WV03_Pan_L1B___1', + 'WV02_MSI_L1B___1', 'WV02_Pan_L1B___1', + 'WV01_MSI_L1B___1', 'WV01_Pan_L1B___1', + 'GE01_MSI_L1B___1', 'GE01_Pan_L1B___1' + ] +} + +# Testing a new Vendor 'testvendor' +# vendor_to_dataset_map = { +# 'planet': +# [ +# 'planet', +# 'PSScene3Band___1' +# ], +# 'maxar': +# [ +# 'WV04_MSI_L1B___1', 'WV04_Pan_L1B___1', +# 'WV03_MSI_L1B___1', 'WV03_Pan_L1B___1', +# 'WV02_MSI_L1B___1', 'WV02_Pan_L1B___1', +# 'WV01_MSI_L1B___1', 'WV01_Pan_L1B___1', +# 'GE01_MSI_L1B___1' +# ], +# 'testvendor': +# [ +# 'GE01_Pan_L1B___1' +# ] +# } + + +# In Code Docs - Function Process. +# +# (1) Make the normal request (Call the other lambda and hold on to it's return value) +# (2) If the is_post_EULA_filter_enabled, is set to False, let the original function's output just pass right through +# (3) If the is_post_EULA_filter_enabled, is set to True, Then we check the request and grant or deny permissions based on the request and info obtained from the user's cognito properties and dataset map +# (3a) If the user's request is not valid, we return an error message that simply says, "insufficient permissions" +# (3b) If the user's request is valid, a file download should start, just the same way as it used to before this filter was installed. + + +# Main Lambda Handler +def lambda_handler(event, context): + print(f'{this_function_name}: STARTED') + # + print(f' input param: event: {event}') + print(f' input param: context: {context}') + # + print(f'is_post_EULA_filter_enabled is set to {is_post_EULA_filter_enabled}') + + # Make the normal request first. + + # Default event to return + ret_event = {'statusCode': 200, 'body': json.dumps('Default')} + + # Try catch for debugging generic errors + has_error = False + error_msg = '' + try: + # Create the boto3 client + client = boto3.client('lambda') + + # Call the Original Lambda here - Passing in the original event parameter + # + # Important Note: This requires the execution role to have permissions + # # + # # FunctionName='arn:aws:lambda:us-west-2:410469285047:function:cumulus-prod-DistributionApiEndpoints', + response__From_Original_Lambda = client.invoke( + FunctionName='arn:aws:lambda:us-west-2:410469285047:function:cumulus-kris-sbx7894-DistributionApiEndpoints', + InvocationType='RequestResponse', + Payload=json.dumps(event) + ) + + # Overwrite the original return - This sends the output DIRECTLY back through the API Gateway return mechanism without modifying it. + ret_event = json.loads(response__From_Original_Lambda['Payload'].read()) + + except: + sysErrorData = str(sys.exc_info()) + has_error = True + error_msg = f'Error invoking Original Lambda. Sys Error Info: {sysErrorData}' + + # If an error occured when calling the original Lambda, we must exit right away. + if (has_error == True): + log_error = {'statusCode': 200, 'body': json.dumps( + f'{this_function_name}: There was an error calling the other function. (error_msg): {error_msg}')} + print(f'log_error: {log_error}') + ret_event = {'statusCode': 200, 'body': json.dumps(f'An Error occured.')} + else: + # This is the point in the code where we determine how to check for EULA + if (is_post_EULA_filter_enabled == True): + print(f'The post EULA filter was turned ON. Proceeding with next Checkpoints...') + + print( + f'Now checking to see if this is a specific file reques by a logged in user.') + # is_logged_in_and_specific_s3_file_request = False + try: + + # This is the path to the file the user is requesting. + # Example1: "/csdap-cumulus-prod-protected/planet/PSScene3Band/20190603_235523_0f4c_thumb" + # Example2: "/csdap-cumulus-uat-protected/WV02_Pan_L1B___1/2009/364/WV02_20091230151804_10300100032C7800_09DEC30151804-P1BS-501603505040_01_P005/WV02_20091230151804_10300100032C7800_09DEC30151804-P1BS-501603505040_01_P005.ntf" + requested_path_str = event['path'] + print(f'requested_path_str: {requested_path_str}') + + # TODO - Check the path object for a specific kind of structure that identifies it?? Or maybe check another property for S3? + + # Has an accessToken (which means they are logged in) + headers_Cookie_obj = event['headers']['Cookie'] + print(f'headers_Cookie_obj: {headers_Cookie_obj}') + + # Getting the access Token (to be used to retrive the correct record from DynamoDB) + accessToken = headers_Cookie_obj.split("accessToken=")[1].split(";")[ + 0] # IF the user is logged in, this accessToken will be in an expected location. + print(f'accessToken from request: {accessToken}') + + # Get the Eula Acceptance String from dynamo DB + # Note: if this funciton fails, a blank str is returned, which later gets denied access. + current_user__eula_acceptance_str = get_eulaAcceptances_from_token_from_dyanmoDB( + accessToken=accessToken) + print( + f'Confirmed current_user__eula_acceptance_str: {current_user__eula_acceptance_str}') + + # Convert the User's EULA Acceptance String into an array of strings + current_user__eula_acceptance_vendor_list = convert_vendor_string_into_vendor_list( + input_string=current_user__eula_acceptance_str) + print( + f'Confirmed current_user__eula_acceptance_vendor_list: {current_user__eula_acceptance_vendor_list}') + + # Get the Vendor's Dataset Directory that the user was trying to request + current_user__requested_dataset_directory_name = convert_requested_path_to_dataset_dir_name( + input_path=requested_path_str) + print( + f'current_user__requested_dataset_directory_name: {current_user__requested_dataset_directory_name}') + + # Finally get the decision of if this user has access or not. + # current_user_has_vendor_access = does_dataset_dir_have_vendor_access(dataset_dir_name="", vendor_access_list=[], vendor_to_dataset_map={}) + current_user_has_vendor_access = does_dataset_dir_have_vendor_access( + dataset_dir_name=current_user__requested_dataset_directory_name, + vendor_access_list=current_user__eula_acceptance_vendor_list, + vendor_to_dataset_map=vendor_to_dataset_map) + print(f'current_user_has_vendor_access: {current_user_has_vendor_access}') + + if (current_user_has_vendor_access == True): + # Do nothing here, if this was a file request and we passed all these checks, then the file download should start for them. + print(f'User DOES have access to the requested dataset.') + + else: + print( + f'User does NOT have access to the requested dataset. Sending back an error message to the user.') + + # Return Status code on error. + # Note: 200 means all is ok, are we passing back an HTML error code along with the message (because they don't have permission)? + # or, are we passing back a 200 because the function executed correctly and the webpage technicaly is doing what it is supposed to? + statusCode = 200 + + msg_to_user = f'"insufficient permissions"' + # ret_event = {'statusCode': 200, 'body': json.dumps('Hello from Lambda!: cumulus-uat-pre-filter-DistributionApiEndpoints') } + ret_event = {'statusCode': statusCode, 'body': json.dumps(f'{msg_to_user}')} + + + except: + sysErrorData = str(sys.exc_info()) + error_msg = f'This is likely NOT a specific S3 file request from a logged in user. In case this is an error, here is the system "except:" info. Sys Error Info: {sysErrorData}' + print(error_msg) + + # TODO - Write that function to detect the type of request + + else: + print( + f'The post EULA filter was turned OFF. Passing the event back to the original requester.') + + # END OF: if(has_error == True): else: + + # return { + # 'statusCode': 200, + # 'body': json.dumps('Hello from Lambda!') + # } + + # DEBUG AND TESTING!! + # ret_event = {'statusCode': 200, 'body': json.dumps(f'THIS PART OF THE APPLICATION IS RUNNING IN TEST MODE RIGHT NOW. -- if you still see this message after retrying a few hoursl ater, please contact Kris!') } + # + print(f'{this_function_name}: Reached the End! Returning now.') + + # Return the event + return ret_event + + +# Check Retrieve the current user's EULA Acceptance from dynamoDB using their AccessToken. +# # The AccessToken is passed into the request after a user successfully logs in. +# # The user's record gets processed and stored into local DynamoDB by the original lambda function which gets called BEFORE this one. +# +# get_eulaAcceptances_from_token_from_dyanmoDB(accessToken=accessToken) +def get_eulaAcceptances_from_token_from_dyanmoDB(accessToken="UNSET"): + # Default as blank, which means no access. + ret_eulaAcceptance_Str = '' + + print( + f'get_eulaAcceptances_from_token_from_dyanmoDB: Input accessToken: {accessToken}') + + try: + # Initialize a session using Amazon DynamoDB + dynamodb = boto3.resource('dynamodb') + + # Get the table object + table = dynamodb.Table(dynamo_db__table_name) + + # Perform a scan to get all items + response = table.scan() + items = response.get('Items', []) + + if not items: + no_items_error = f'There was an error when getting items from the dynamodb table. The Items from response.get("items", []) was null.' + print(no_items_error) + + row_counter = 0 + for item in items: + # print(f'Current Row: {row_counter}: item: {item}') + item__accessToken = item['accessToken'] + item__tokenInfo__eulaAcceptances = item['tokenInfo']['eulaAcceptances'] + if (item__accessToken == accessToken): + print( + f'Found a matching access token. Now taking property tokenInfo.eulaAcceptances: {item__tokenInfo__eulaAcceptances}') + ret_eulaAcceptance_Str = str(item__tokenInfo__eulaAcceptances) + row_counter = row_counter + 1 + + + except: + sysErrorData = str(sys.exc_info()) + error_msg = f'Error getting eula Acceptances from DynamoDB. Sys Error Info: {sysErrorData}' + ret_eulaAcceptance_Str = '' + + return ret_eulaAcceptance_Str + + +# Function to convert a string like this: "Vendor One, Vendor Two" into an array like this: ["vendorone", "vendortwo"] +def convert_vendor_string_into_vendor_list(input_string=""): + ret_array = [] + + # Validation - Check if the input string is empty, if so, return an empty list, which will fail the access checkpoint later. + if not input_string.strip(): + return [] + + # Split the string by comma, remove internal spaces, strip extra spaces, and convert each element to lowercase + # ret_array = [element.replace(" ", "").strip().lower() for element in input_string.split('.') if element.strip()] + ret_array = [] + + # Split the string by comma and iterate over each element + for element in input_string.split(','): + # Check if the element is not just whitespace + if element.strip(): + # Remove internal spaces, strip trailing spaces, convert to lowercase + cleaned_element = element.replace(" ", "").strip().lower() + ret_array.append(cleaned_element) + + return ret_array + + +# Convert the user requested path into a dataset directory name. +# +# # Convert this: +# "/csdap-cumulus-uat-protected/WV02_Pan_L1B___1/2009/364/WV02_20091230151804_10300100032C7800_09DEC30151804-P1BS-501603505040_01_P005/WV02_20091230151804_10300100032C7800_09DEC30151804-P1BS-501603505040_01_P005.ntf" +# # To This: +# "WV02_Pan_L1B___1" +# +# At this time, the parent dataset directory is always the second directory. +# # For MAXAR, there are multiple dataset directories at this level -- Example Items look like this: "WV02_Pan_L1B___1" and "GE01_MSI_L1B___1" +# # For Planet, there is a vendor level directory at this level, so it is a bit simpler -- Example Item looks like this: "planet" +def convert_requested_path_to_dataset_dir_name(input_path=""): + # Default return to blank string. + ret_dataset_dir_name = "" + + # Validation - Check to see if the input was an empty string. If it is, then return another empty string (which will later fail the access checkpoint.) + if (input_path == ""): + return "" + + # Split the input path by a forward slash + path_parts = input_path.split('/') + print( + f'Split path_parts: {path_parts}') # Example: ['', 'csdap-cumulus-uat-protected', 'WV02_Pan_L1B___1', '2009', '364', 'WV02_20091230151804_10300100032C7800_09DEC30151804-P1BS-501603505040_01_P005', 'WV02_20091230151804_10300100032C7800_09DEC30151804-P1BS-501603505040_01_P005.ntf'] + + # Get the element at position 2 (third item in the directory path) -- remember, first item is blank string. + ret_dataset_dir_name = path_parts[2] # input_path.split('/')[2] + + # Return the result + return ret_dataset_dir_name + + +# Check the parts of the 'vendor_to_dataset_map' (from settings) that are in the 'vendor_access_list' (from the user) to see if this 'dataset_dir_name' (from the user) has access or not. +def does_dataset_dir_have_vendor_access(dataset_dir_name="", vendor_access_list=[], + vendor_to_dataset_map={}): + # Always default access to False + ret_has_access = False + + # Validation Section - If any of the inputs are blank, then deny access right here. + # + # If a blank string made it's way in here, deny the access (return False) + if (dataset_dir_name == ""): + print(f'has_access: input: dataset_dir_name was a blank string. ""') + return False + # + if (vendor_access_list == []): + print(f'has_access: input: vendor_access_list was an empty list [].') + return False + # + # If the configuration is broken somehow (blank object) // if not vendor_to_dataset_map: is a true statement if vendor_to_dataset_map is ONLY defined by {} (blank object) + if not vendor_to_dataset_map: + print("has_access: input: vendor_to_dataset_map was an empty object {}.") + return False + + # Get the keys to the vendor to dataset map + vendor_to_dataset_map__keys = vendor_to_dataset_map.keys() + print(f'has_access: vendor_to_dataset_map__keys {vendor_to_dataset_map__keys}') + + # Now Use the vendor_access_list to check parts of the map and see if any of the directory paths match. + for vendor_access_item in vendor_access_list: + + # Check the map, but ONLY if the current vendor name is in the keys of the map + # # This part is critical, this is how we will NOT give access to a directory that is not included in the vendor access list, which comes directly from the EULA Acceptance String + if (vendor_access_item in vendor_to_dataset_map__keys): + current_vendor_dirs_to_check = vendor_to_dataset_map[vendor_access_item] + + # Now Iterate all the Directories found in the part of the Vendor map that is for this current vendor which was in the vendor access list. + for current_vendor_dir in current_vendor_dirs_to_check: + if (current_vendor_dir == dataset_dir_name): + ret_has_access = True + print( + f'has_access: Found a match {current_vendor_dir} found in the map matches with the requested directory: {dataset_dir_name}') + else: + # The only condition when this block should get hit is if there is a vendor listed on the user's EULA Acceptance List that does not currently exist in the vendor_to_dataset_map. It might be a new vendor, or an error, or a mis spelled word? + print( + f'has_access: Warning:, Vendor, {vendor_access_item} was passed in but not found in the (vendor_to_dataset_map__keys): {vendor_to_dataset_map__keys}. Is this a new vendor that needs to be added? This might be an error, check the spelling of the Vendor which appears on the EULA Acceptance string') + + # Return the decision. + return ret_has_access + +# END OF LAMBDA diff --git a/app/stacks/post-deploy-mods/resources/lambdas/pre-filter-DistributionApiEndpoints/src/lambda_function.py b/app/stacks/post-deploy-mods/resources/lambdas/pre-filter-DistributionApiEndpoints/src/lambda_function.py new file mode 100644 index 0000000..3636ba8 --- /dev/null +++ b/app/stacks/post-deploy-mods/resources/lambdas/pre-filter-DistributionApiEndpoints/src/lambda_function.py @@ -0,0 +1,367 @@ +# Imports Section +import json + +# For catching generic errors +import sys + +# To call another lambda, from this lambda +import boto3 + +# SETTINGS +# +# This function's name (for logging purposes) +#this_function_name = "cumulus-prod-pre-filter-DistributionApiEndpoints" +this_function_name = "ENV_VAR__CUMULUS_PREFIX-pre-filter-DistributionApiEndpoints" + +# +# If this is set to False, this function does nothing extra than the original lambda did, it just allows a pass through +# If this is set to True, this function does the normal request, then attempts to detect a request for a file, if this IS a file request, then it checks EULA permissions for the current user +is_post_EULA_filter_enabled = True # True # False +# +# Which dynamo DB table holds the Access Tokens after a succesful authentication? +#dynamo_db__table_name = 'cumulus-prod-DistributionAccessTokensTable' +dynamo_db__table_name = 'ENV_VAR__CUMULUS_PREFIX-DistributionAccessTokensTable' + +# +# All Possible Dataset directories by Vendor - When a new vendor or dataset is added, add to this list. +vendor_to_dataset_map = { + 'planet': + [ + 'planet', + 'PSScene3Band___1' + ], + 'maxar': + [ + 'WV04_MSI_L1B___1', 'WV04_Pan_L1B___1', + 'WV03_MSI_L1B___1', 'WV03_Pan_L1B___1', + 'WV02_MSI_L1B___1', 'WV02_Pan_L1B___1', + 'WV01_MSI_L1B___1', 'WV01_Pan_L1B___1', + 'GE01_MSI_L1B___1', 'GE01_Pan_L1B___1' + ] +} + +# Testing a new Vendor 'testvendor' +# vendor_to_dataset_map = { +# 'planet': +# [ +# 'planet', +# 'PSScene3Band___1' +# ], +# 'maxar': +# [ +# 'WV04_MSI_L1B___1', 'WV04_Pan_L1B___1', +# 'WV03_MSI_L1B___1', 'WV03_Pan_L1B___1', +# 'WV02_MSI_L1B___1', 'WV02_Pan_L1B___1', +# 'WV01_MSI_L1B___1', 'WV01_Pan_L1B___1', +# 'GE01_MSI_L1B___1' +# ], +# 'testvendor': +# [ +# 'GE01_Pan_L1B___1' +# ] +# } + + +# In Code Docs - Function Process. +# +# (1) Make the normal request (Call the other lambda and hold on to it's return value) +# (2) If the is_post_EULA_filter_enabled, is set to False, let the original function's output just pass right through +# (3) If the is_post_EULA_filter_enabled, is set to True, Then we check the request and grant or deny permissions based on the request and info obtained from the user's cognito properties and dataset map +# (3a) If the user's request is not valid, we return an error message that simply says, "insufficient permissions" +# (3b) If the user's request is valid, a file download should start, just the same way as it used to before this filter was installed. + + +# Main Lambda Handler +def lambda_handler(event, context): + print(f'{this_function_name}: STARTED') + # + print(f' input param: event: {event}') + print(f' input param: context: {context}') + # + print(f'is_post_EULA_filter_enabled is set to {is_post_EULA_filter_enabled}') + + # Make the normal request first. + + # Default event to return + ret_event = {'statusCode': 200, 'body': json.dumps('Default')} + + # Try catch for debugging generic errors + has_error = False + error_msg = '' + try: + # Create the boto3 client + client = boto3.client('lambda') + + # Call the Original Lambda here - Passing in the original event parameter + # + # Important Note: This requires the execution role to have permissions + # # + # # FunctionName='arn:aws:lambda:us-west-2:410469285047:function:cumulus-prod-DistributionApiEndpoints', + response__From_Original_Lambda = client.invoke( + FunctionName='arn:aws:lambda:us-west-2:410469285047:function:ENV_VAR__CUMULUS_PREFIX-DistributionApiEndpoints', + InvocationType='RequestResponse', + Payload=json.dumps(event) + ) + + # Overwrite the original return - This sends the output DIRECTLY back through the API Gateway return mechanism without modifying it. + ret_event = json.loads(response__From_Original_Lambda['Payload'].read()) + + except: + sysErrorData = str(sys.exc_info()) + has_error = True + error_msg = f'Error invoking Original Lambda. Sys Error Info: {sysErrorData}' + + # If an error occured when calling the original Lambda, we must exit right away. + if (has_error == True): + log_error = {'statusCode': 200, 'body': json.dumps( + f'{this_function_name}: There was an error calling the other function. (error_msg): {error_msg}')} + print(f'log_error: {log_error}') + ret_event = {'statusCode': 200, 'body': json.dumps(f'An Error occured.')} + else: + # This is the point in the code where we determine how to check for EULA + if (is_post_EULA_filter_enabled == True): + print(f'The post EULA filter was turned ON. Proceeding with next Checkpoints...') + + print( + f'Now checking to see if this is a specific file reques by a logged in user.') + # is_logged_in_and_specific_s3_file_request = False + try: + + # This is the path to the file the user is requesting. + # Example1: "/csdap-cumulus-prod-protected/planet/PSScene3Band/20190603_235523_0f4c_thumb" + # Example2: "/csdap-cumulus-uat-protected/WV02_Pan_L1B___1/2009/364/WV02_20091230151804_10300100032C7800_09DEC30151804-P1BS-501603505040_01_P005/WV02_20091230151804_10300100032C7800_09DEC30151804-P1BS-501603505040_01_P005.ntf" + requested_path_str = event['path'] + print(f'requested_path_str: {requested_path_str}') + + # TODO - Check the path object for a specific kind of structure that identifies it?? Or maybe check another property for S3? + + # Has an accessToken (which means they are logged in) + headers_Cookie_obj = event['headers']['Cookie'] + print(f'headers_Cookie_obj: {headers_Cookie_obj}') + + # Getting the access Token (to be used to retrive the correct record from DynamoDB) + accessToken = headers_Cookie_obj.split("accessToken=")[1].split(";")[ + 0] # IF the user is logged in, this accessToken will be in an expected location. + print(f'accessToken from request: {accessToken}') + + # Get the Eula Acceptance String from dynamo DB + # Note: if this funciton fails, a blank str is returned, which later gets denied access. + current_user__eula_acceptance_str = get_eulaAcceptances_from_token_from_dyanmoDB( + accessToken=accessToken) + print( + f'Confirmed current_user__eula_acceptance_str: {current_user__eula_acceptance_str}') + + # Convert the User's EULA Acceptance String into an array of strings + current_user__eula_acceptance_vendor_list = convert_vendor_string_into_vendor_list( + input_string=current_user__eula_acceptance_str) + print( + f'Confirmed current_user__eula_acceptance_vendor_list: {current_user__eula_acceptance_vendor_list}') + + # Get the Vendor's Dataset Directory that the user was trying to request + current_user__requested_dataset_directory_name = convert_requested_path_to_dataset_dir_name( + input_path=requested_path_str) + print( + f'current_user__requested_dataset_directory_name: {current_user__requested_dataset_directory_name}') + + # Finally get the decision of if this user has access or not. + # current_user_has_vendor_access = does_dataset_dir_have_vendor_access(dataset_dir_name="", vendor_access_list=[], vendor_to_dataset_map={}) + current_user_has_vendor_access = does_dataset_dir_have_vendor_access( + dataset_dir_name=current_user__requested_dataset_directory_name, + vendor_access_list=current_user__eula_acceptance_vendor_list, + vendor_to_dataset_map=vendor_to_dataset_map) + print(f'current_user_has_vendor_access: {current_user_has_vendor_access}') + + if (current_user_has_vendor_access == True): + # Do nothing here, if this was a file request and we passed all these checks, then the file download should start for them. + print(f'User DOES have access to the requested dataset.') + + else: + print( + f'User does NOT have access to the requested dataset. Sending back an error message to the user.') + + # Return Status code on error. + # Note: 200 means all is ok, are we passing back an HTML error code along with the message (because they don't have permission)? + # or, are we passing back a 200 because the function executed correctly and the webpage technicaly is doing what it is supposed to? + statusCode = 200 + + msg_to_user = f'"insufficient permissions"' + # ret_event = {'statusCode': 200, 'body': json.dumps('Hello from Lambda!: cumulus-uat-pre-filter-DistributionApiEndpoints') } + ret_event = {'statusCode': statusCode, 'body': json.dumps(f'{msg_to_user}')} + + + except: + sysErrorData = str(sys.exc_info()) + error_msg = f'This is likely NOT a specific S3 file request from a logged in user. In case this is an error, here is the system "except:" info. Sys Error Info: {sysErrorData}' + print(error_msg) + + # TODO - Write that function to detect the type of request + + else: + print( + f'The post EULA filter was turned OFF. Passing the event back to the original requester.') + + # END OF: if(has_error == True): else: + + # return { + # 'statusCode': 200, + # 'body': json.dumps('Hello from Lambda!') + # } + + # DEBUG AND TESTING!! + # ret_event = {'statusCode': 200, 'body': json.dumps(f'THIS PART OF THE APPLICATION IS RUNNING IN TEST MODE RIGHT NOW. -- if you still see this message after retrying a few hoursl ater, please contact Kris!') } + # + print(f'{this_function_name}: Reached the End! Returning now.') + + # Return the event + return ret_event + + +# Check Retrieve the current user's EULA Acceptance from dynamoDB using their AccessToken. +# # The AccessToken is passed into the request after a user successfully logs in. +# # The user's record gets processed and stored into local DynamoDB by the original lambda function which gets called BEFORE this one. +# +# get_eulaAcceptances_from_token_from_dyanmoDB(accessToken=accessToken) +def get_eulaAcceptances_from_token_from_dyanmoDB(accessToken="UNSET"): + # Default as blank, which means no access. + ret_eulaAcceptance_Str = '' + + print( + f'get_eulaAcceptances_from_token_from_dyanmoDB: Input accessToken: {accessToken}') + + try: + # Initialize a session using Amazon DynamoDB + dynamodb = boto3.resource('dynamodb') + + # Get the table object + table = dynamodb.Table(dynamo_db__table_name) + + # Perform a scan to get all items + response = table.scan() + items = response.get('Items', []) + + if not items: + no_items_error = f'There was an error when getting items from the dynamodb table. The Items from response.get("items", []) was null.' + print(no_items_error) + + row_counter = 0 + for item in items: + # print(f'Current Row: {row_counter}: item: {item}') + item__accessToken = item['accessToken'] + item__tokenInfo__eulaAcceptances = item['tokenInfo']['eulaAcceptances'] + if (item__accessToken == accessToken): + print( + f'Found a matching access token. Now taking property tokenInfo.eulaAcceptances: {item__tokenInfo__eulaAcceptances}') + ret_eulaAcceptance_Str = str(item__tokenInfo__eulaAcceptances) + row_counter = row_counter + 1 + + + except: + sysErrorData = str(sys.exc_info()) + error_msg = f'Error getting eula Acceptances from DynamoDB. Sys Error Info: {sysErrorData}' + ret_eulaAcceptance_Str = '' + + return ret_eulaAcceptance_Str + + +# Function to convert a string like this: "Vendor One, Vendor Two" into an array like this: ["vendorone", "vendortwo"] +def convert_vendor_string_into_vendor_list(input_string=""): + ret_array = [] + + # Validation - Check if the input string is empty, if so, return an empty list, which will fail the access checkpoint later. + if not input_string.strip(): + return [] + + # Split the string by comma, remove internal spaces, strip extra spaces, and convert each element to lowercase + # ret_array = [element.replace(" ", "").strip().lower() for element in input_string.split('.') if element.strip()] + ret_array = [] + + # Split the string by comma and iterate over each element + for element in input_string.split(','): + # Check if the element is not just whitespace + if element.strip(): + # Remove internal spaces, strip trailing spaces, convert to lowercase + cleaned_element = element.replace(" ", "").strip().lower() + ret_array.append(cleaned_element) + + return ret_array + + +# Convert the user requested path into a dataset directory name. +# +# # Convert this: +# "/csdap-cumulus-uat-protected/WV02_Pan_L1B___1/2009/364/WV02_20091230151804_10300100032C7800_09DEC30151804-P1BS-501603505040_01_P005/WV02_20091230151804_10300100032C7800_09DEC30151804-P1BS-501603505040_01_P005.ntf" +# # To This: +# "WV02_Pan_L1B___1" +# +# At this time, the parent dataset directory is always the second directory. +# # For MAXAR, there are multiple dataset directories at this level -- Example Items look like this: "WV02_Pan_L1B___1" and "GE01_MSI_L1B___1" +# # For Planet, there is a vendor level directory at this level, so it is a bit simpler -- Example Item looks like this: "planet" +def convert_requested_path_to_dataset_dir_name(input_path=""): + # Default return to blank string. + ret_dataset_dir_name = "" + + # Validation - Check to see if the input was an empty string. If it is, then return another empty string (which will later fail the access checkpoint.) + if (input_path == ""): + return "" + + # Split the input path by a forward slash + path_parts = input_path.split('/') + print( + f'Split path_parts: {path_parts}') # Example: ['', 'csdap-cumulus-uat-protected', 'WV02_Pan_L1B___1', '2009', '364', 'WV02_20091230151804_10300100032C7800_09DEC30151804-P1BS-501603505040_01_P005', 'WV02_20091230151804_10300100032C7800_09DEC30151804-P1BS-501603505040_01_P005.ntf'] + + # Get the element at position 2 (third item in the directory path) -- remember, first item is blank string. + ret_dataset_dir_name = path_parts[2] # input_path.split('/')[2] + + # Return the result + return ret_dataset_dir_name + + +# Check the parts of the 'vendor_to_dataset_map' (from settings) that are in the 'vendor_access_list' (from the user) to see if this 'dataset_dir_name' (from the user) has access or not. +def does_dataset_dir_have_vendor_access(dataset_dir_name="", vendor_access_list=[], + vendor_to_dataset_map={}): + # Always default access to False + ret_has_access = False + + # Validation Section - If any of the inputs are blank, then deny access right here. + # + # If a blank string made it's way in here, deny the access (return False) + if (dataset_dir_name == ""): + print(f'has_access: input: dataset_dir_name was a blank string. ""') + return False + # + if (vendor_access_list == []): + print(f'has_access: input: vendor_access_list was an empty list [].') + return False + # + # If the configuration is broken somehow (blank object) // if not vendor_to_dataset_map: is a true statement if vendor_to_dataset_map is ONLY defined by {} (blank object) + if not vendor_to_dataset_map: + print("has_access: input: vendor_to_dataset_map was an empty object {}.") + return False + + # Get the keys to the vendor to dataset map + vendor_to_dataset_map__keys = vendor_to_dataset_map.keys() + print(f'has_access: vendor_to_dataset_map__keys {vendor_to_dataset_map__keys}') + + # Now Use the vendor_access_list to check parts of the map and see if any of the directory paths match. + for vendor_access_item in vendor_access_list: + + # Check the map, but ONLY if the current vendor name is in the keys of the map + # # This part is critical, this is how we will NOT give access to a directory that is not included in the vendor access list, which comes directly from the EULA Acceptance String + if (vendor_access_item in vendor_to_dataset_map__keys): + current_vendor_dirs_to_check = vendor_to_dataset_map[vendor_access_item] + + # Now Iterate all the Directories found in the part of the Vendor map that is for this current vendor which was in the vendor access list. + for current_vendor_dir in current_vendor_dirs_to_check: + if (current_vendor_dir == dataset_dir_name): + ret_has_access = True + print( + f'has_access: Found a match {current_vendor_dir} found in the map matches with the requested directory: {dataset_dir_name}') + else: + # The only condition when this block should get hit is if there is a vendor listed on the user's EULA Acceptance List that does not currently exist in the vendor_to_dataset_map. It might be a new vendor, or an error, or a mis spelled word? + print( + f'has_access: Warning:, Vendor, {vendor_access_item} was passed in but not found in the (vendor_to_dataset_map__keys): {vendor_to_dataset_map__keys}. Is this a new vendor that needs to be added? This might be an error, check the spelling of the Vendor which appears on the EULA Acceptance string') + + # Return the decision. + return ret_has_access + +# END OF LAMBDA diff --git a/app/stacks/post-deploy-mods/resources/lambdas/pre-filter-DistributionApiEndpoints/zip_lambda.sh b/app/stacks/post-deploy-mods/resources/lambdas/pre-filter-DistributionApiEndpoints/zip_lambda.sh new file mode 100644 index 0000000..5681eee --- /dev/null +++ b/app/stacks/post-deploy-mods/resources/lambdas/pre-filter-DistributionApiEndpoints/zip_lambda.sh @@ -0,0 +1,50 @@ +#!/bin/bash + +# Shell script to zip the lambda function to prepare for deployment +# Before zipping, we must replace a few references to the env contained within the +# python code so that this will work in all of our deployment environments. + +# Spacer to make the terminal output easier to follow +echo "" + +# This should extract TS_ENV from the current .envfile +# DOTENV should be available to this script when called from inside the makefile +TSENV_VALUE=$(grep "^TS_ENV=" $DOTENV | cut -d '=' -f 2) # Examples: kris-sbx7894 or uat or prod +CUMULUS_PREFIX_VAR="cumulus-$TSENV_VALUE" +echo "zip_lambdas.sh: CUMULUS_PREFIX_VAR is: $CUMULUS_PREFIX_VAR" + +# Current Execution Path +CURRENT_DIR=$(pwd) +#echo "sh zip_lambdas: Current execution path: $CURRENT_DIR" + +# Get the full path to the directory where the lambda is located +FULL_PREFIX_PATH="$CURRENT_DIR/app/stacks/post-deploy-mods/resources/lambdas/pre-filter-DistributionApiEndpoints" +#echo "sh zip_lambdas: FULL_PREFIX_PATH: $FULL_PREFIX_PATH" + +# File Path Variables +LAMBDA_FILE_ORIGINAL="$FULL_PREFIX_PATH/src/lambda_function.py" +LAMBDA_FILE="$FULL_PREFIX_PATH/distro/lambda_function.py" +ZIP_FILE="$FULL_PREFIX_PATH/distro/lambda.zip" + +# First, replace some of the code with the correct prefix +#CUMULUS_PREFIX=$1 # CUMULUS_PREFIX should already be an environment variable +STRING_TO_REPLACE="ENV_VAR__CUMULUS_PREFIX" + +echo "zip_lambdas.sh: About to copy $LAMBDA_FILE_ORIGINAL to $LAMBDA_FILE" +cp "$LAMBDA_FILE_ORIGINAL" "$LAMBDA_FILE" +echo "zip_lambdas.sh: About to replace occurrences of $STRING_TO_REPLACE with $CUMULUS_PREFIX_VAR in file: $LAMBDA_FILE" + +# Actually do the replacement +awk -v old="$STRING_TO_REPLACE" -v new="$CUMULUS_PREFIX_VAR" '{gsub(old, new); print}' "$LAMBDA_FILE_ORIGINAL" > "$LAMBDA_FILE" +echo "zip_lambdas.sh: Done preparing the correct python file" + +# Output to the Terminal +echo "zip_lambdas.sh: About to zip $LAMBDA_FILE" + +# Zip the Lambda function +zip -j "$ZIP_FILE" "$LAMBDA_FILE" + +# Output to the Terminal +echo "zip_lambdas.sh: Completed. Zipped Lambda to $ZIP_FILE" +echo "" + diff --git a/app/stacks/post-deploy-mods/tfvars/base.tfvars b/app/stacks/post-deploy-mods/tfvars/base.tfvars new file mode 100644 index 0000000..3531d50 --- /dev/null +++ b/app/stacks/post-deploy-mods/tfvars/base.tfvars @@ -0,0 +1,13 @@ +# NOTE: The following line is commented out only to avoid Terraform syntax +# warnings in editors that recognize Terraform files. Although it is commented +# out, Terraspace still recognizes the dependency. +# This ensures that the resources to be modified will exist before attempting to modify them! +# +# During a sandbox deploy, this line above appeared to not work. +# Leaving it here to match the pattern of the other main.tf files +# Also, created a new config/stacks.rb file to define the order of modules. +# +# Note: I found out that this line below must be in this .tfvars file, putting it inside the main.tf file does not work. +# # Also adding stacks.rb files to the modules does not work either. +# +#<% depends_on("cumulus") %> diff --git a/bin/terraform-doctor.sh b/bin/terraform-doctor.sh index 1a61022..8d6a3c8 100755 --- a/bin/terraform-doctor.sh +++ b/bin/terraform-doctor.sh @@ -23,7 +23,7 @@ set -eou pipefail _module=${1:-} if [[ -z "${_module}" ]]; then - echo "Usage: ${0} [cumulus | data-persistence | rds-cluster]" 2>&1 + echo "Usage: ${0} [cumulus | data-persistence | rds-cluster | post-deploy-mods]" 2>&1 echo "" 2>&1 echo "ERROR: No module specified." 2>&1 exit 1 diff --git a/config/stacks.rb b/config/stacks.rb new file mode 100644 index 0000000..c179b1b --- /dev/null +++ b/config/stacks.rb @@ -0,0 +1,18 @@ +# config/stacks.rb + +# To ensure the stacks are stood up in the correct/expected order. + +stack "rds-cluster" + +stack "data-persistence" do + depends_on "rds_cluster" +end + +stack "cumulus" do + depends_on "data-persistence" +end + +stack "post-deploy-mods" do + depends_on "cumulus" +end + diff --git a/docs/OPERATING.md b/docs/OPERATING.md index 1ffc907..2fef5bd 100644 --- a/docs/OPERATING.md +++ b/docs/OPERATING.md @@ -15,6 +15,10 @@ - [Performing Discovery without Ingestion](#performing-discovery-without-ingestion) - [Updating CMR Metadata (Self Discovery)](#updating-cmr-metadata-self-discovery) - [Destroying a Deployment](#destroying-a-deployment) +- [Cognito Integration](#cognito-integration) + - [Updating Vendor Based Data Access Filter](#updating-vendor-based-data-access-filter) + + ## Update Launchpad Certificate @@ -777,7 +781,7 @@ make nuke ``` After confirmation (you will have an opportunity to abort), this will destroy -all resources managed by Terraform in all of the modules (`cumulus`, then +all resources managed by Terraform in all of the modules (`post-deploy-mods`, then `cumulus`, then `data-persistence`, and finally `rds-cluster`), so it will take quite a bit of time to complete. @@ -789,6 +793,65 @@ you must manually finish any cleanup effort. **NOTE:** If you encounter any errors during destruction, refer to the [troubleshooting guide](./TROUBLESHOOTING.md). +## Cognito Integration +Cognito Integration path through Cumulus for a user to get data: +- A request is made for data which hits the lambda endpoint named cumulus--DistributionApiEndPoints +- If the request is for an S3 file, then it gets forwarded to ESDIS Cognito System for User Login +- After the User has a successful login, the request then comes back to cumulus--DistributionApiEndPoints where the file is served. +- Note: There is a custom layer in pre-filter-DistributionApiEndpoints where a user's vendor access variable is checked before allowing or denying the file access. + +Related AWS Components and Configuration +- API Gateway Connected to Lambda +- Lambda cumulus--DistributionApiEndPoints +- Lambda pre-filter--DistributionApiEndPoints +- CloudFront Distribution Configuration + - Under Origins, there should be a path that points to an origin domain that begins with `s3-` + - This part of the config happens on the ESDIS side. + +Vendor Based Access to Datasets +- CSDA Admin Staff are able to configure which user has access to which vendor (this process may even be automated in another system) +- On the ESDIS side, the custom property contain the list of allowed Vendors is attached to the user upon authentication +- On this side, within pre-filter--DistributionApiEndpoints, the request is checked against the access list and the request is either denied, or allowed to proceed. +- Each Vendor has its own set of subdirectories which correspond to datasets. +- The pre-filter--DistributionApiEndpoints lambda is packaged up and sent to AWS via the 4th module named `post-deploy-mods` + - Note: The Deployment in Terraspace was configured so the deployment of post-deploy-mods happens AFTER the `cumulus` module. +If this deployment happens before the `cumulus` module, then the vendor filter function will not be located in its proper place at the back end of the API Gateway for Distribution Endpoints. + + +### Updating Vendor Based Data Access Filter + +Directly in the Code: +- Note: It is better to update the local copy of the code rather than AWS deployed version. See section below. +- Log in to the AWS Dashboard (Testing only works in UAT or PROD since there is no sandbox set up with ESDIS cognito authentication) +- Browse to the Lambda functions on AWS. + - Search for "pre-filter-DistributionApiEndpoints" + - Search URL: https://us-west-2.console.aws.amazon.com/lambda/home?region=us-west-2#/functions?fo=and&o0=%3A&v0=pre-filter-DistributionApiEndpoints +- Open the Lambda function and view. + - On UAT this function is called, "cumulus-uat-pre-filter-DistributionApiEndpoints" // https://us-west-2.console.aws.amazon.com/lambda/home?region=us-west-2#/functions/cumulus-uat-pre-filter-DistributionApiEndpoints?tab=code + - On PROD this function is called, "cumulus-prod-pre-filter-DistributionApiEndpoints" // https://us-west-2.console.aws.amazon.com/lambda/home?region=us-west-2#/functions/cumulus-prod-pre-filter-DistributionApiEndpoints?tab=code +- Edit the code, by following the steps under "Updating the Code definition to add a new vendor" +- When code updates are completed, make sure to save (ctrl+s) and then click on Deploy + +Updating the Code definition to add a new vendor +- Edit the local copy of the Code (found in `apps/stacks/post-deploy-mods/resources/lambdas/pre-filter-DistributionApiEndpoints/src/lambda_function.py` ) + - Look for a variable named: `vendor_to_dataset_map` +This is a python dictionary. The top level keys are vendor names. +At the time of this writing, the current vendor names are: `planet` and `maxar`. +The value after each of these vendor names is an array which lists the top level directories where that vendor's datasets are found. +For example, under the vendor `maxar` we have multiple datasets which have S3 directory names of: `['WV04_MSI_L1B___1', 'WV04_Pan_L1B___1','WV03_MSI_L1B___1', 'WV03_Pan_L1B___1','WV02_MSI_L1B___1', 'WV02_Pan_L1B___1','WV01_MSI_L1B___1', 'WV01_Pan_L1B___1','GE01_MSI_L1B___1', 'GE01_Pan_L1B___1']` + - To add a new vendor (`testvendor`), + - create a new top level key such as `testvendor` + - next, add an array containing all of the subdirectories for that vendor. + - If there is only 1 to add, it will be a single element string array. +Example: `'testvendor': ['tv01_data']` // Adding a vendor called, `testvendor` with a single dataset directory called, `tv01_data`. + - Important Note. The vendor names should be lowercase and have no spaces or non-alpha numeric characters. The first character should not be a number. Not following this note may lead to errors for users attempting to download data. + + + +END + + + [Cumulus API]: https://nasa.github.io/cumulus-api/ [Cumulus CLI]: diff --git a/docs/TROUBLESHOOTING.md b/docs/TROUBLESHOOTING.md index 4a049a7..ed72cbf 100644 --- a/docs/TROUBLESHOOTING.md +++ b/docs/TROUBLESHOOTING.md @@ -10,6 +10,7 @@ - [Execution Role Does Not Have Permissions](#execution-role-does-not-have-permissions) - [Missing Map Element: AutoscalingGroupName](#missing-map-element-autoscalinggroupname) - [Duplicate Resources](#duplicate-resources) + - [Possible Local System Problems](#Possible Local System Problems) - [Destroying a Deployment](#destroying-a-deployment) - [Error Reading Secrets Manager Secret Policy](#error-reading-secrets-manager-secret-policy) - [Instance Cannot be Destroyed (Resource has `lifecycle.prevent_destroy` set)](#instance-cannot-be-destroyed-resource-has-lifecycleprevent_destroy-set) @@ -273,6 +274,24 @@ as each `terraform import` command can import only one resource at a time, and each time it must lock the state file, import the resource, and release the lock. +### Possible Local System Problems + +If you are running other docker containers, you might encounter unspecified +errors that are related to your system resources. +Try the following items +1. Restart your local machine and try again. +1. Close as many programs and browser tabs as possible and try again. +1. Clean your docker environment + +Here are some reference commands for Docker +``` +make docker # // Creates the Docker Image +docker image ls -a # // Lists the docker images on your system +docker ps -a # // Show docker processes +docker system prune -a # // Prunes (cleans) parts of docker +// Or you can use the Docker Desktop UI to pause and / or remove images and containers as needed. +``` + ## Destroying a Deployment ### Error Reading Secrets Manager Secret Policy diff --git a/scripts/src/terraform-doctor.ts b/scripts/src/terraform-doctor.ts index 82aa184..04538b5 100644 --- a/scripts/src/terraform-doctor.ts +++ b/scripts/src/terraform-doctor.ts @@ -11,7 +11,7 @@ * * where: * - * - MODULE is one of: cumulus, data-persistence, rds-cluster + * - MODULE is one of: cumulus, data-persistence, rds-cluster, post-deploy-mods * - PRESCRIPTION is the list of commands output by terraform-doctor. After * changing directory to the corresponding module path, run every command * output by terraform-doctor before changing directory back to `/work`.