From 6a0999bc1eea9caf01a718bc19207e0ae85875d5 Mon Sep 17 00:00:00 2001 From: mattBrzezinski Date: Sat, 7 Sep 2024 06:15:01 +0000 Subject: [PATCH] AWS API Definitions Updated --- src/AWSServices.jl | 9 +- src/services/acm.jl | 66 +- src/services/acm_pca.jl | 19 +- src/services/amplify.jl | 2 + src/services/appconfig.jl | 94 +- src/services/appintegrations.jl | 131 +- src/services/application_auto_scaling.jl | 326 ++-- src/services/application_signals.jl | 117 +- src/services/appstream.jl | 239 ++- src/services/arc_zonal_shift.jl | 146 +- src/services/auto_scaling.jl | 20 +- src/services/backup.jl | 443 +++--- src/services/batch.jl | 19 +- src/services/bedrock.jl | 775 +++++++++- src/services/bedrock_agent.jl | 992 +++++++++++- src/services/bedrock_agent_runtime.jl | 180 ++- src/services/bedrock_runtime.jl | 134 +- src/services/chatbot.jl | 230 +-- src/services/chime_sdk_media_pipelines.jl | 31 +- src/services/cleanrooms.jl | 838 ++++++++++- src/services/cloudfront.jl | 9 +- src/services/cloudhsm_v2.jl | 189 ++- src/services/cloudwatch_logs.jl | 151 +- src/services/codebuild.jl | 49 +- src/services/codepipeline.jl | 141 +- src/services/codestar.jl | 701 --------- src/services/cognito_identity_provider.jl | 253 ++-- src/services/compute_optimizer.jl | 6 +- src/services/connect.jl | 597 ++++++-- src/services/connect_contact_lens.jl | 2 +- src/services/controlcatalog.jl | 77 + src/services/controltower.jl | 87 +- src/services/cost_optimization_hub.jl | 16 +- src/services/datazone.jl | 1336 ++++++++++++++++- src/services/deadline.jl | 118 +- src/services/device_farm.jl | 18 +- src/services/direct_connect.jl | 58 +- src/services/docdb.jl | 72 + src/services/dynamodb.jl | 29 +- src/services/ec2.jl | 583 +++++-- src/services/ecr.jl | 290 +++- src/services/ecs.jl | 163 +- src/services/eks.jl | 9 + src/services/elastic_load_balancing_v2.jl | 167 ++- src/services/elasticache.jl | 603 ++++---- src/services/entityresolution.jl | 40 +- src/services/finspace.jl | 3 + src/services/firehose.jl | 4 + src/services/fis.jl | 64 + src/services/gamelift.jl | 52 +- src/services/glue.jl | 254 ++++ src/services/iam.jl | 31 +- src/services/internetmonitor.jl | 11 +- src/services/iotsitewise.jl | 134 +- src/services/ivs_realtime.jl | 149 +- src/services/kinesis_analytics_v2.jl | 99 +- src/services/kinesis_video_webrtc_storage.jl | 88 +- src/services/lambda.jl | 274 +++- src/services/lex_models_v2.jl | 6 +- .../license_manager_linux_subscriptions.jl | 315 +++- src/services/mediaconnect.jl | 37 + src/services/medialive.jl | 3 + src/services/medical_imaging.jl | 11 + src/services/memorydb.jl | 10 +- src/services/mobile.jl | 299 ---- src/services/mq.jl | 63 +- src/services/network_firewall.jl | 26 +- src/services/omics.jl | 20 +- src/services/opensearch.jl | 2 + src/services/organizations.jl | 35 +- src/services/payment_cryptography_data.jl | 59 +- src/services/pcs.jl | 991 ++++++++++++ src/services/personalize.jl | 111 +- src/services/pi.jl | 3 +- src/services/pinpoint_sms_voice_v2.jl | 144 +- src/services/qapps.jl | 1242 +++++++++++++++ src/services/qbusiness.jl | 37 +- src/services/qconnect.jl | 205 +++ src/services/quicksight.jl | 120 ++ src/services/rds.jl | 122 +- src/services/redshift_data.jl | 56 +- src/services/redshift_serverless.jl | 12 +- src/services/rekognition.jl | 6 +- src/services/resiliencehub.jl | 217 ++- src/services/rolesanywhere.jl | 4 + src/services/s3.jl | 589 ++++---- src/services/s3_control.jl | 102 +- src/services/sagemaker.jl | 333 +++- src/services/secrets_manager.jl | 33 +- src/services/securityhub.jl | 55 +- src/services/ses.jl | 27 +- src/services/sesv2.jl | 24 +- src/services/sfn.jl | 80 +- src/services/ssm.jl | 73 +- src/services/ssm_quicksetup.jl | 423 ++++++ src/services/supplychain.jl | 8 +- src/services/timestream_influxdb.jl | 3 + src/services/timestream_query.jl | 4 +- src/services/tnb.jl | 38 +- src/services/wafv2.jl | 15 +- src/services/workspaces.jl | 403 ++++- 101 files changed, 14285 insertions(+), 3819 deletions(-) delete mode 100644 src/services/codestar.jl delete mode 100644 src/services/mobile.jl create mode 100644 src/services/pcs.jl create mode 100644 src/services/qapps.jl create mode 100644 src/services/ssm_quicksetup.jl diff --git a/src/AWSServices.jl b/src/AWSServices.jl index 259e8f8066..148641b3fe 100644 --- a/src/AWSServices.jl +++ b/src/AWSServices.jl @@ -178,9 +178,6 @@ const codeguruprofiler = AWS.RestJSONService( const codepipeline = AWS.JSONService( "codepipeline", "codepipeline", "2015-07-09", "1.1", "CodePipeline_20150709" ) -const codestar = AWS.JSONService( - "codestar", "codestar", "2017-04-19", "1.1", "CodeStar_20170419" -) const codestar_connections = AWS.JSONService( "codestar-connections", "codestar-connections", @@ -558,7 +555,6 @@ const migrationhuborchestrator = AWS.RestJSONService( const migrationhubstrategy = AWS.RestJSONService( "migrationhub-strategy", "migrationhub-strategy", "2020-02-19" ) -const mobile = AWS.RestJSONService("AWSMobileHubService", "mobile", "2017-07-01") const mobile_analytics = AWS.RestJSONService( "mobileanalytics", "mobileanalytics", "2014-06-05" ) @@ -613,6 +609,9 @@ const pca_connector_ad = AWS.RestJSONService( const pca_connector_scep = AWS.RestJSONService( "pca-connector-scep", "pca-connector-scep", "2018-05-10" ) +const pcs = AWS.JSONService( + "pcs", "pcs", "2023-02-10", "1.0", "AWSParallelComputingService" +) const personalize = AWS.JSONService( "personalize", "personalize", "2018-05-22", "1.1", "AmazonPersonalize" ) @@ -640,6 +639,7 @@ const privatenetworks = AWS.RestJSONService( "private-networks", "private-networks", "2021-12-03" ) const proton = AWS.JSONService("proton", "proton", "2020-07-20", "1.0", "AwsProton20200720") +const qapps = AWS.RestJSONService("qapps", "data.qapps", "2023-11-27") const qbusiness = AWS.RestJSONService("qbusiness", "qbusiness", "2023-11-27") const qconnect = AWS.RestJSONService("wisdom", "wisdom", "2020-10-19") const qldb = AWS.RestJSONService("qldb", "qldb", "2019-01-02") @@ -772,6 +772,7 @@ const ssm_contacts = AWS.JSONService( "ssm-contacts", "ssm-contacts", "2021-05-03", "1.1", "SSMContacts" ) const ssm_incidents = AWS.RestJSONService("ssm-incidents", "ssm-incidents", "2018-05-10") +const ssm_quicksetup = AWS.RestJSONService("ssm-quicksetup", "ssm-quicksetup", "2018-05-10") const ssm_sap = AWS.RestJSONService("ssm-sap", "ssm-sap", "2018-05-10") const sso = AWS.RestJSONService("awsssoportal", "portal.sso", "2019-06-10") const sso_admin = AWS.JSONService("sso", "sso", "2020-07-20", "1.1", "SWBExternalService") diff --git a/src/services/acm.jl b/src/services/acm.jl index 1e05ab2315..68d4b6e640 100644 --- a/src/services/acm.jl +++ b/src/services/acm.jl @@ -223,10 +223,12 @@ end get_certificate(certificate_arn) get_certificate(certificate_arn, params::Dict{String,<:Any}) -Retrieves an Amazon-issued certificate and its certificate chain. The chain consists of the -certificate of the issuing CA and the intermediate certificates of any other subordinate -CAs. All of the certificates are base64 encoded. You can use OpenSSL to decode the -certificates and inspect individual fields. +Retrieves a certificate and its certificate chain. The certificate may be either a public +or private certificate issued using the ACM RequestCertificate action, or a certificate +imported into ACM using the ImportCertificate action. The chain consists of the certificate +of the issuing CA and the intermediate certificates of any other subordinate CAs. All of +the certificates are base64 encoded. You can use OpenSSL to decode the certificates and +inspect individual fields. # Arguments - `certificate_arn`: String that contains a certificate ARN in the following format: @@ -271,23 +273,21 @@ ACM does not provide managed renewal for certificates that you import. Note the guidelines when importing third party certificates: You must enter the private key that matches the certificate you are importing. The private key must be unencrypted. You cannot import a private key that is protected by a password or a passphrase. The private -key must be no larger than 5 KB (5,120 bytes). If the certificate you are importing is -not self-signed, you must enter its certificate chain. If a certificate chain is -included, the issuer must be the subject of one of the certificates in the chain. The -certificate, private key, and certificate chain must be PEM-encoded. The current time -must be between the Not Before and Not After certificate fields. The Issuer field must -not be empty. The OCSP authority URL, if present, must not exceed 1000 characters. To -import a new certificate, omit the CertificateArn argument. Include this argument only when -you want to replace a previously imported certificate. When you import a certificate by -using the CLI, you must specify the certificate, the certificate chain, and the private key -by their file names preceded by fileb://. For example, you can specify a certificate saved -in the C:temp folder as fileb://C:tempcertificate_to_import.pem. If you are making an HTTP -or HTTPS Query request, include these arguments as BLOBs. When you import a certificate -by using an SDK, you must specify the certificate, the certificate chain, and the private -key files in the manner required by the programming language you're using. The -cryptographic algorithm of an imported certificate must match the algorithm of the signing -CA. For example, if the signing CA key type is RSA, then the certificate key type must also -be RSA. This operation returns the Amazon Resource Name (ARN) of the imported certificate. +key must be no larger than 5 KB (5,120 bytes). The certificate, private key, and +certificate chain must be PEM-encoded. The current time must be between the Not Before +and Not After certificate fields. The Issuer field must not be empty. The OCSP +authority URL, if present, must not exceed 1000 characters. To import a new certificate, +omit the CertificateArn argument. Include this argument only when you want to replace a +previously imported certificate. When you import a certificate by using the CLI, you must +specify the certificate, the certificate chain, and the private key by their file names +preceded by fileb://. For example, you can specify a certificate saved in the C:temp folder +as fileb://C:tempcertificate_to_import.pem. If you are making an HTTP or HTTPS Query +request, include these arguments as BLOBs. When you import a certificate by using an +SDK, you must specify the certificate, the certificate chain, and the private key files in +the manner required by the programming language you're using. The cryptographic +algorithm of an imported certificate must match the algorithm of the signing CA. For +example, if the signing CA key type is RSA, then the certificate key type must also be RSA. + This operation returns the Amazon Resource Name (ARN) of the imported certificate. # Arguments - `certificate`: The certificate to import. @@ -335,10 +335,12 @@ end list_certificates() list_certificates(params::Dict{String,<:Any}) -Retrieves a list of certificate ARNs and domain names. You can request that only -certificates that match a specific status be listed. You can also filter by specific -attributes of the certificate. Default filtering returns only RSA_2048 certificates. For -more information, see Filters. +Retrieves a list of certificate ARNs and domain names. By default, the API returns RSA_2048 +certificates. To return all certificates in the account, include the keyType filter with +the values [RSA_1024, RSA_2048, RSA_3072, RSA_4096, EC_prime256v1, EC_secp384r1, +EC_secp521r1]. In addition to keyType, you can also filter by the CertificateStatuses, +keyUsage, and extendedKeyUsage attributes on the certificate. For more information, see +Filters. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -592,10 +594,16 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys certificate uses to encrypt data. RSA is the default key algorithm for ACM certificates. Elliptic Curve Digital Signature Algorithm (ECDSA) keys are smaller, offering security comparable to RSA keys but with greater computing efficiency. However, ECDSA is not - supported by all network clients. Some AWS services may require RSA keys, or only support - ECDSA keys of a particular size, while others allow the use of either RSA and ECDSA keys to - ensure that compatibility is not broken. Check the requirements for the AWS service where - you plan to deploy your certificate. Default: RSA_2048 + supported by all network clients. Some Amazon Web Services services may require RSA keys, + or only support ECDSA keys of a particular size, while others allow the use of either RSA + and ECDSA keys to ensure that compatibility is not broken. Check the requirements for the + Amazon Web Services service where you plan to deploy your certificate. For more information + about selecting an algorithm, see Key algorithms. Algorithms supported for an ACM + certificate request include: RSA_2048 EC_prime256v1 EC_secp384r1 Other + listed algorithms are for imported certificates only. When you request a private PKI + certificate signed by a CA from Amazon Web Services Private CA, the specified signing + algorithm family (RSA or ECDSA) must match the algorithm family of the CA's secret key. + Default: RSA_2048 - `"Options"`: Currently, you can use this parameter to specify whether to add the certificate to a certificate transparency log. Certificate transparency makes it possible to detect SSL/TLS certificates that have been mistakenly or maliciously issued. diff --git a/src/services/acm_pca.jl b/src/services/acm_pca.jl index c039eea54b..3767bc9303 100644 --- a/src/services/acm_pca.jl +++ b/src/services/acm_pca.jl @@ -702,7 +702,7 @@ a Policy for Cross-Account Access. # Arguments - `resource_arn`: The Amazon Resource Number (ARN) of the private CA that will have its policy retrieved. You can find the CA's ARN by calling the ListCertificateAuthorities - action. + action. </p> """ function get_policy(ResourceArn; aws_config::AbstractAWSConfig=global_aws_config()) @@ -754,15 +754,14 @@ certificate signed by the preceding subordinate CA must come next, and so on unt chain is built. The chain must be PEM-encoded. The maximum allowed size of a certificate is 32 KB. The maximum allowed size of a certificate chain is 2 MB. Enforcement of Critical Constraints Amazon Web Services Private CA allows the following -extensions to be marked critical in the imported CA certificate or chain. Authority key -identifier Basic constraints (must be marked critical) Certificate policies Extended -key usage Inhibit anyPolicy Issuer alternative name Key usage Name constraints -Policy mappings Subject alternative name Subject directory attributes Subject key -identifier Subject information access Amazon Web Services Private CA rejects the -following extensions when they are marked critical in an imported CA certificate or chain. - Authority information access CRL distribution points Freshest CRL Policy constraints - Amazon Web Services Private Certificate Authority will also reject any other extension -marked as critical not contained on the preceding list of allowed extensions. +extensions to be marked critical in the imported CA certificate or chain. Basic +constraints (must be marked critical) Subject alternative names Key usage Extended +key usage Authority key identifier Subject key identifier Issuer alternative name +Subject directory attributes Subject information access Certificate policies Policy +mappings Inhibit anyPolicy Amazon Web Services Private CA rejects the following +extensions when they are marked critical in an imported CA certificate or chain. Name +constraints Policy constraints CRL distribution points Authority information access +Freshest CRL Any other extension # Arguments - `certificate`: The PEM-encoded certificate for a private CA. This may be a self-signed diff --git a/src/services/amplify.jl b/src/services/amplify.jl index a586af0907..6bd7e8337d 100644 --- a/src/services/amplify.jl +++ b/src/services/amplify.jl @@ -32,6 +32,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys must base64-encode the authorization credentials and provide them in the format user:password. - `"buildSpec"`: The build specification (build spec) for an Amplify app. +- `"cacheConfig"`: The cache configuration for the Amplify app. - `"customHeaders"`: The custom HTTP headers for an Amplify app. - `"customRules"`: The custom rewrite and redirect rules for an Amplify app. - `"description"`: The description of the Amplify app. @@ -1360,6 +1361,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys must base64-encode the authorization credentials and provide them in the format user:password. - `"buildSpec"`: The build specification (build spec) for an Amplify app. +- `"cacheConfig"`: The cache configuration for the Amplify app. - `"customHeaders"`: The custom HTTP headers for an Amplify app. - `"customRules"`: The custom redirect and rewrite rules for an Amplify app. - `"description"`: The description for an Amplify app. diff --git a/src/services/appconfig.jl b/src/services/appconfig.jl index 9cd18ee642..74e18c147e 100644 --- a/src/services/appconfig.jl +++ b/src/services/appconfig.jl @@ -408,12 +408,17 @@ end create_hosted_configuration_version(application_id, configuration_profile_id, content, content-_type) create_hosted_configuration_version(application_id, configuration_profile_id, content, content-_type, params::Dict{String,<:Any}) -Creates a new configuration in the AppConfig hosted configuration store. +Creates a new configuration in the AppConfig hosted configuration store. If you're creating +a feature flag, we recommend you familiarize yourself with the JSON schema for feature flag +data. For more information, see Type reference for AWS.AppConfig.FeatureFlags in the +AppConfig User Guide. # Arguments - `application_id`: The application ID. - `configuration_profile_id`: The configuration profile ID. -- `content`: The content of the configuration or the configuration data. +- `content`: The configuration data, as bytes. AppConfig accepts any type of data, + including text formats like JSON or TOML, or binary formats like protocol buffers or + compressed data. - `content-_type`: A standard MIME type describing the format of the configuration content. For more information, see Content-Type. @@ -476,7 +481,7 @@ end delete_application(application_id) delete_application(application_id, params::Dict{String,<:Any}) -Deletes an application. Deleting an application does not delete a configuration from a host. +Deletes an application. # Arguments - `application_id`: The ID of the application to delete. @@ -510,14 +515,27 @@ end delete_configuration_profile(application_id, configuration_profile_id) delete_configuration_profile(application_id, configuration_profile_id, params::Dict{String,<:Any}) -Deletes a configuration profile. Deleting a configuration profile does not delete a -configuration from a host. +Deletes a configuration profile. To prevent users from unintentionally deleting +actively-used configuration profiles, enable deletion protection. # Arguments - `application_id`: The application ID that includes the configuration profile you want to delete. - `configuration_profile_id`: The ID of the configuration profile you want to delete. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"x-amzn-deletion-protection-check"`: A parameter to configure deletion protection. If + enabled, deletion protection prevents a user from deleting a configuration profile if your + application has called either GetLatestConfiguration or for the configuration profile + during the specified interval. This parameter supports the following values: BYPASS: + Instructs AppConfig to bypass the deletion protection check and delete a configuration + profile even if deletion protection would have otherwise prevented it. APPLY: Instructs + the deletion protection check to run, even if deletion protection is disabled at the + account level. APPLY also forces the deletion protection check to run against resources + created in the past hour, which are normally excluded from deletion protection checks. + ACCOUNT_DEFAULT: The default setting, which instructs AppConfig to implement the deletion + protection value specified in the UpdateAccountSettings API. """ function delete_configuration_profile( ApplicationId, ConfigurationProfileId; aws_config::AbstractAWSConfig=global_aws_config() @@ -548,8 +566,7 @@ end delete_deployment_strategy(deployment_strategy_id) delete_deployment_strategy(deployment_strategy_id, params::Dict{String,<:Any}) -Deletes a deployment strategy. Deleting a deployment strategy does not delete a -configuration from a host. +Deletes a deployment strategy. # Arguments - `deployment_strategy_id`: The ID of the deployment strategy you want to delete. @@ -583,13 +600,27 @@ end delete_environment(application_id, environment_id) delete_environment(application_id, environment_id, params::Dict{String,<:Any}) -Deletes an environment. Deleting an environment does not delete a configuration from a host. +Deletes an environment. To prevent users from unintentionally deleting actively-used +environments, enable deletion protection. # Arguments - `application_id`: The application ID that includes the environment that you want to delete. - `environment_id`: The ID of the environment that you want to delete. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"x-amzn-deletion-protection-check"`: A parameter to configure deletion protection. If + enabled, deletion protection prevents a user from deleting an environment if your + application called either GetLatestConfiguration or in the environment during the specified + interval. This parameter supports the following values: BYPASS: Instructs AppConfig to + bypass the deletion protection check and delete a configuration profile even if deletion + protection would have otherwise prevented it. APPLY: Instructs the deletion protection + check to run, even if deletion protection is disabled at the account level. APPLY also + forces the deletion protection check to run against resources created in the past hour, + which are normally excluded from deletion protection checks. ACCOUNT_DEFAULT: The + default setting, which instructs AppConfig to implement the deletion protection value + specified in the UpdateAccountSettings API. """ function delete_environment( ApplicationId, EnvironmentId; aws_config::AbstractAWSConfig=global_aws_config() @@ -732,6 +763,26 @@ function delete_hosted_configuration_version( ) end +""" + get_account_settings() + get_account_settings(params::Dict{String,<:Any}) + +Returns information about the status of the DeletionProtection parameter. + +""" +function get_account_settings(; aws_config::AbstractAWSConfig=global_aws_config()) + return appconfig( + "GET", "/settings"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function get_account_settings( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return appconfig( + "GET", "/settings", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + """ get_application(application_id) get_application(application_id, params::Dict{String,<:Any}) @@ -1629,6 +1680,33 @@ function untag_resource( ) end +""" + update_account_settings() + update_account_settings(params::Dict{String,<:Any}) + +Updates the value of the DeletionProtection parameter. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DeletionProtection"`: A parameter to configure deletion protection. If enabled, + deletion protection prevents a user from deleting a configuration profile or an environment + if AppConfig has called either GetLatestConfiguration or for the configuration profile or + from the environment during the specified interval. Deletion protection is disabled by + default. The default interval for ProtectionPeriodInMinutes is 60. +""" +function update_account_settings(; aws_config::AbstractAWSConfig=global_aws_config()) + return appconfig( + "PATCH", "/settings"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function update_account_settings( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return appconfig( + "PATCH", "/settings", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + """ update_application(application_id) update_application(application_id, params::Dict{String,<:Any}) diff --git a/src/services/appintegrations.jl b/src/services/appintegrations.jl index 326efb01fa..e3fbb812a2 100644 --- a/src/services/appintegrations.jl +++ b/src/services/appintegrations.jl @@ -8,8 +8,7 @@ using AWS.UUIDs create_application(application_source_config, name, namespace) create_application(application_source_config, name, namespace, params::Dict{String,<:Any}) -This API is in preview release and subject to change. Creates and persists an Application -resource. +Creates and persists an Application resource. # Arguments - `application_source_config`: The configuration for where the application should be loaded @@ -77,17 +76,16 @@ function create_application( end """ - create_data_integration(kms_key, name, source_uri) - create_data_integration(kms_key, name, source_uri, params::Dict{String,<:Any}) + create_data_integration(kms_key, name) + create_data_integration(kms_key, name, params::Dict{String,<:Any}) Creates and persists a DataIntegration resource. You cannot create a DataIntegration association for a DataIntegration that has been previously associated. Use a different DataIntegration, or recreate the DataIntegration using the CreateDataIntegration API. # Arguments -- `kms_key`: The KMS key for the DataIntegration. +- `kms_key`: The KMS key ARN for the DataIntegration. - `name`: The name of the DataIntegration. -- `source_uri`: The URI of the data source. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -99,20 +97,18 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ObjectConfiguration"`: The configuration for what data should be pulled from the source. - `"ScheduleConfig"`: The name of the data and how often it should be pulled from the source. +- `"SourceURI"`: The URI of the data source. - `"Tags"`: The tags used to organize, track, or control access for this resource. For example, { \"tags\": {\"key1\":\"value1\", \"key2\":\"value2\"} }. """ function create_data_integration( - KmsKey, Name, SourceURI; aws_config::AbstractAWSConfig=global_aws_config() + KmsKey, Name; aws_config::AbstractAWSConfig=global_aws_config() ) return appintegrations( "POST", "/dataIntegrations", Dict{String,Any}( - "KmsKey" => KmsKey, - "Name" => Name, - "SourceURI" => SourceURI, - "ClientToken" => string(uuid4()), + "KmsKey" => KmsKey, "Name" => Name, "ClientToken" => string(uuid4()) ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -121,7 +117,6 @@ end function create_data_integration( KmsKey, Name, - SourceURI, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) @@ -132,10 +127,7 @@ function create_data_integration( mergewith( _merge, Dict{String,Any}( - "KmsKey" => KmsKey, - "Name" => Name, - "SourceURI" => SourceURI, - "ClientToken" => string(uuid4()), + "KmsKey" => KmsKey, "Name" => Name, "ClientToken" => string(uuid4()) ), params, ), @@ -145,6 +137,55 @@ function create_data_integration( ) end +""" + create_data_integration_association(identifier) + create_data_integration_association(identifier, params::Dict{String,<:Any}) + +Creates and persists a DataIntegrationAssociation resource. + +# Arguments +- `identifier`: A unique identifier for the DataIntegration. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientAssociationMetadata"`: The mapping of metadata to be extracted from the data. +- `"ClientId"`: The identifier for the client that is associated with the DataIntegration + association. +- `"ClientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. If not provided, the Amazon Web Services SDK populates this + field. For more information about idempotency, see Making retries safe with idempotent APIs. +- `"DestinationURI"`: The URI of the data destination. +- `"ExecutionConfiguration"`: The configuration for how the files should be pulled from the + source. +- `"ObjectConfiguration"`: +""" +function create_data_integration_association( + Identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return appintegrations( + "POST", + "/dataIntegrations/$(Identifier)/associations", + Dict{String,Any}("ClientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_data_integration_association( + Identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appintegrations( + "POST", + "/dataIntegrations/$(Identifier)/associations", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ClientToken" => string(uuid4())), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_event_integration(event_bridge_bus, event_filter, name) create_event_integration(event_bridge_bus, event_filter, name, params::Dict{String,<:Any}) @@ -320,7 +361,7 @@ end get_application(application_identifier) get_application(application_identifier, params::Dict{String,<:Any}) -This API is in preview release and subject to change. Get an Application resource. +Get an Application resource. # Arguments - `application_identifier`: The Amazon Resource Name (ARN) of the Application. @@ -457,7 +498,7 @@ end list_applications() list_applications(params::Dict{String,<:Any}) -This API is in preview release and subject to change. Lists applications in the account. +Lists applications in the account. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -733,8 +774,7 @@ end update_application(application_identifier) update_application(application_identifier, params::Dict{String,<:Any}) -This API is in preview release and subject to change. Updates and persists an Application -resource. +Updates and persists an Application resource. # Arguments - `application_identifier`: The Amazon Resource Name (ARN) of the Application. @@ -814,6 +854,57 @@ function update_data_integration( ) end +""" + update_data_integration_association(data_integration_association_identifier, execution_configuration, identifier) + update_data_integration_association(data_integration_association_identifier, execution_configuration, identifier, params::Dict{String,<:Any}) + +Updates and persists a DataIntegrationAssociation resource. Updating a +DataIntegrationAssociation with ExecutionConfiguration will rerun the on-demand job. + +# Arguments +- `data_integration_association_identifier`: A unique identifier. of the + DataIntegrationAssociation resource +- `execution_configuration`: The configuration for how the files should be pulled from the + source. +- `identifier`: A unique identifier for the DataIntegration. + +""" +function update_data_integration_association( + DataIntegrationAssociationIdentifier, + ExecutionConfiguration, + Identifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appintegrations( + "PATCH", + "/dataIntegrations/$(Identifier)/associations/$(DataIntegrationAssociationIdentifier)", + Dict{String,Any}("ExecutionConfiguration" => ExecutionConfiguration); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_data_integration_association( + DataIntegrationAssociationIdentifier, + ExecutionConfiguration, + Identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appintegrations( + "PATCH", + "/dataIntegrations/$(Identifier)/associations/$(DataIntegrationAssociationIdentifier)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ExecutionConfiguration" => ExecutionConfiguration), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_event_integration(name) update_event_integration(name, params::Dict{String,<:Any}) diff --git a/src/services/application_auto_scaling.jl b/src/services/application_auto_scaling.jl index e50633819a..c57f3b09e8 100644 --- a/src/services/application_auto_scaling.jl +++ b/src/services/application_auto_scaling.jl @@ -19,7 +19,7 @@ scaling policy in the Application Auto Scaling User Guide. - `resource_id`: The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: - service/default/sample-webapp. Spot Fleet - The resource type is spot-fleet-request and + service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -51,26 +51,28 @@ scaling policy in the Application Auto Scaling User Guide. 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `scalable_dimension`: The scalable dimension. This string consists of the service - namespace, resource type, and scaling property. ecs:service:DesiredCount - The desired - task count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The - instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The - target capacity of a Spot Fleet. appstream:fleet:DesiredCapacity - The desired capacity - of an AppStream 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read - capacity for a DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write - capacity for a DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read - capacity for a DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The - provisioned write capacity for a DynamoDB global secondary index. - rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. - Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition. - sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model - endpoint variant. custom-resource:ResourceType:Property - The scalable dimension for a - custom resource provided by your own application or service. + namespace, resource type, and scaling property. ecs:service:DesiredCount - The task + count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance + count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target + capacity of a Spot Fleet. appstream:fleet:DesiredCapacity - The capacity of an AppStream + 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a + DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a + DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a + DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The provisioned + write capacity for a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The + count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible + edition and Aurora PostgreSQL-compatible edition. sagemaker:variant:DesiredInstanceCount + - The number of EC2 instances for a SageMaker model endpoint variant. + custom-resource:ResourceType:Property - The scalable dimension for a custom resource + provided by your own application or service. comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint. comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units @@ -85,8 +87,10 @@ scaling policy in the Application Auto Scaling User Guide. Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. - `service_namespace`: The namespace of the Amazon Web Services service that provides the resource. For a resource provided by your own application or service, use custom-resource instead. @@ -149,7 +153,7 @@ more information, see Delete a scheduled action in the Application Auto Scaling - `resource_id`: The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: - service/default/sample-webapp. Spot Fleet - The resource type is spot-fleet-request and + service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -181,26 +185,28 @@ more information, see Delete a scheduled action in the Application Auto Scaling 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `scalable_dimension`: The scalable dimension. This string consists of the service - namespace, resource type, and scaling property. ecs:service:DesiredCount - The desired - task count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The - instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The - target capacity of a Spot Fleet. appstream:fleet:DesiredCapacity - The desired capacity - of an AppStream 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read - capacity for a DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write - capacity for a DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read - capacity for a DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The - provisioned write capacity for a DynamoDB global secondary index. - rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. - Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition. - sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model - endpoint variant. custom-resource:ResourceType:Property - The scalable dimension for a - custom resource provided by your own application or service. + namespace, resource type, and scaling property. ecs:service:DesiredCount - The task + count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance + count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target + capacity of a Spot Fleet. appstream:fleet:DesiredCapacity - The capacity of an AppStream + 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a + DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a + DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a + DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The provisioned + write capacity for a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The + count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible + edition and Aurora PostgreSQL-compatible edition. sagemaker:variant:DesiredInstanceCount + - The number of EC2 instances for a SageMaker model endpoint variant. + custom-resource:ResourceType:Property - The scalable dimension for a custom resource + provided by your own application or service. comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint. comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units @@ -215,8 +221,10 @@ more information, see Delete a scheduled action in the Application Auto Scaling Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. - `scheduled_action_name`: The name of the scheduled action. - `service_namespace`: The namespace of the Amazon Web Services service that provides the resource. For a resource provided by your own application or service, use custom-resource @@ -282,7 +290,7 @@ with it. - `resource_id`: The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: - service/default/sample-webapp. Spot Fleet - The resource type is spot-fleet-request and + service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -314,17 +322,19 @@ with it. 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `scalable_dimension`: The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property. - ecs:service:DesiredCount - The desired task count of an ECS service. + ecs:service:DesiredCount - The task count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet. - appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet. + appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global @@ -349,8 +359,10 @@ with it. Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. - `service_namespace`: The namespace of the Amazon Web Services service that provides the resource. For a resource provided by your own application or service, use custom-resource instead. @@ -421,7 +433,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ResourceIds"`: The identifier of the resource associated with the scalable target. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: - service/default/sample-webapp. Spot Fleet - The resource type is spot-fleet-request and + service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -453,18 +465,20 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `"ScalableDimension"`: The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you must also specify a resource ID. - ecs:service:DesiredCount - The desired task count of an ECS service. + ecs:service:DesiredCount - The task count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet. - appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet. + appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global @@ -489,8 +503,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. """ function describe_scalable_targets( ServiceNamespace; aws_config::AbstractAWSConfig=global_aws_config() @@ -549,7 +565,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ResourceId"`: The identifier of the resource associated with the scaling activity. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: - service/default/sample-webapp. Spot Fleet - The resource type is spot-fleet-request and + service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -581,20 +597,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `"ScalableDimension"`: The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you - must also specify a resource ID. ecs:service:DesiredCount - The desired task count of an - ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR + must also specify a resource ID. ecs:service:DesiredCount - The task count of an ECS + service. elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - Fleet. appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet. - dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. - dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. - dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global + Fleet. appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet. + dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. + dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. + dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora @@ -616,8 +634,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. """ function describe_scaling_activities( ServiceNamespace; aws_config::AbstractAWSConfig=global_aws_config() @@ -672,7 +692,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ResourceId"`: The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: - service/default/sample-webapp. Spot Fleet - The resource type is spot-fleet-request and + service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -704,20 +724,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `"ScalableDimension"`: The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you - must also specify a resource ID. ecs:service:DesiredCount - The desired task count of an - ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR + must also specify a resource ID. ecs:service:DesiredCount - The task count of an ECS + service. elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - Fleet. appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet. - dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. - dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. - dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global + Fleet. appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet. + dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. + dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. + dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora @@ -739,8 +761,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. """ function describe_scaling_policies( ServiceNamespace; aws_config::AbstractAWSConfig=global_aws_config() @@ -775,8 +799,8 @@ end Describes the Application Auto Scaling scheduled actions for the specified service namespace. You can filter the results using the ResourceId, ScalableDimension, and -ScheduledActionNames parameters. For more information, see Scheduled scaling and Managing -scheduled scaling in the Application Auto Scaling User Guide. +ScheduledActionNames parameters. For more information, see Scheduled scaling in the +Application Auto Scaling User Guide. # Arguments - `service_namespace`: The namespace of the Amazon Web Services service that provides the @@ -794,7 +818,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ResourceId"`: The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: - service/default/sample-webapp. Spot Fleet - The resource type is spot-fleet-request and + service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -826,20 +850,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `"ScalableDimension"`: The scalable dimension. This string consists of the service namespace, resource type, and scaling property. If you specify a scalable dimension, you - must also specify a resource ID. ecs:service:DesiredCount - The desired task count of an - ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR + must also specify a resource ID. ecs:service:DesiredCount - The task count of an ECS + service. elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - Fleet. appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet. - dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. - dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. - dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global + Fleet. appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet. + dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. + dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. + dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The provisioned write capacity for a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible edition and Aurora @@ -861,8 +887,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. - `"ScheduledActionNames"`: The names of the scheduled actions to describe. """ function describe_scheduled_actions( @@ -897,8 +925,8 @@ end list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) Returns all the tags on the specified Application Auto Scaling scalable target. For general -information about tags, including the format and syntax, see Tagging Amazon Web Services -resources in the Amazon Web Services General Reference. +information about tags, including the format and syntax, see Tagging your Amazon Web +Services resources in the Amazon Web Services General Reference. # Arguments - `resource_arn`: Specify the ARN of the scalable target. For example: @@ -964,7 +992,7 @@ scaling policies that were specified for the scalable target are deleted. - `resource_id`: The identifier of the resource associated with the scaling policy. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: - service/default/sample-webapp. Spot Fleet - The resource type is spot-fleet-request and + service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -996,26 +1024,28 @@ scaling policies that were specified for the scalable target are deleted. 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `scalable_dimension`: The scalable dimension. This string consists of the service - namespace, resource type, and scaling property. ecs:service:DesiredCount - The desired - task count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The - instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The - target capacity of a Spot Fleet. appstream:fleet:DesiredCapacity - The desired capacity - of an AppStream 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read - capacity for a DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write - capacity for a DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read - capacity for a DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The - provisioned write capacity for a DynamoDB global secondary index. - rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. - Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition. - sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model - endpoint variant. custom-resource:ResourceType:Property - The scalable dimension for a - custom resource provided by your own application or service. + namespace, resource type, and scaling property. ecs:service:DesiredCount - The task + count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance + count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target + capacity of a Spot Fleet. appstream:fleet:DesiredCapacity - The capacity of an AppStream + 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a + DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a + DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a + DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The provisioned + write capacity for a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The + count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible + edition and Aurora PostgreSQL-compatible edition. sagemaker:variant:DesiredInstanceCount + - The number of EC2 instances for a SageMaker model endpoint variant. + custom-resource:ResourceType:Property - The scalable dimension for a custom resource + provided by your own application or service. comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint. comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units @@ -1030,8 +1060,10 @@ scaling policies that were specified for the scalable target are deleted. Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. - `service_namespace`: The namespace of the Amazon Web Services service that provides the resource. For a resource provided by your own application or service, use custom-resource instead. @@ -1116,7 +1148,7 @@ scheduled actions that were specified for the scalable target are deleted. - `resource_id`: The identifier of the resource associated with the scheduled action. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service name. Example: - service/default/sample-webapp. Spot Fleet - The resource type is spot-fleet-request and + service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -1148,26 +1180,28 @@ scheduled actions that were specified for the scalable target are deleted. 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `scalable_dimension`: The scalable dimension. This string consists of the service - namespace, resource type, and scaling property. ecs:service:DesiredCount - The desired - task count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The - instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The - target capacity of a Spot Fleet. appstream:fleet:DesiredCapacity - The desired capacity - of an AppStream 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read - capacity for a DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write - capacity for a DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read - capacity for a DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The - provisioned write capacity for a DynamoDB global secondary index. - rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora DB cluster. - Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible edition. - sagemaker:variant:DesiredInstanceCount - The number of EC2 instances for a SageMaker model - endpoint variant. custom-resource:ResourceType:Property - The scalable dimension for a - custom resource provided by your own application or service. + namespace, resource type, and scaling property. ecs:service:DesiredCount - The task + count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance + count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target + capacity of a Spot Fleet. appstream:fleet:DesiredCapacity - The capacity of an AppStream + 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a + DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a + DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a + DynamoDB global secondary index. dynamodb:index:WriteCapacityUnits - The provisioned + write capacity for a DynamoDB global secondary index. rds:cluster:ReadReplicaCount - The + count of Aurora Replicas in an Aurora DB cluster. Available for Aurora MySQL-compatible + edition and Aurora PostgreSQL-compatible edition. sagemaker:variant:DesiredInstanceCount + - The number of EC2 instances for a SageMaker model endpoint variant. + custom-resource:ResourceType:Property - The scalable dimension for a custom resource + provided by your own application or service. comprehend:document-classifier-endpoint:DesiredInferenceUnits - The number of inference units for an Amazon Comprehend document classification endpoint. comprehend:entity-recognizer-endpoint:DesiredInferenceUnits - The number of inference units @@ -1182,8 +1216,10 @@ scheduled actions that were specified for the scalable target are deleted. Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. - `scheduled_action_name`: The name of the scheduled action. This name must be unique among all other scheduled actions on the specified scalable target. - `service_namespace`: The namespace of the Amazon Web Services service that provides the @@ -1205,8 +1241,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys At and cron expressions use Universal Coordinated Time (UTC) by default. The cron format consists of six fields separated by white spaces: [Minutes] [Hours] [Day_of_Month] [Month] [Day_of_Week] [Year]. For rate expressions, value is a positive integer and unit is minute - | minutes | hour | hours | day | days. For more information and examples, see Example - scheduled actions for Application Auto Scaling in the Application Auto Scaling User Guide. + | minutes | hour | hours | day | days. For more information, see Schedule recurring scaling + actions using cron expressions in the Application Auto Scaling User Guide. - `"StartTime"`: The date and time for this scheduled action to start, in UTC. - `"Timezone"`: Specifies the time zone used when setting a scheduled action by using an at or cron expression. If a time zone is not provided, UTC is used by default. Valid values @@ -1294,7 +1330,7 @@ even if you don't include the MinCapacity or MaxCapacity request parameters. - `resource_id`: The identifier of the resource that is associated with the scalable target. This string consists of the resource type and unique identifier. ECS service - The resource type is service and the unique identifier is the cluster name and service - name. Example: service/default/sample-webapp. Spot Fleet - The resource type is + name. Example: service/my-cluster/my-service. Spot Fleet - The resource type is spot-fleet-request and the unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. EMR cluster - The resource type is instancegroup and the unique identifier is the cluster ID and instance group ID. @@ -1326,17 +1362,19 @@ even if you don't include the MinCapacity or MaxCapacity request parameters. 2e31-5. Amazon ElastiCache replication group - The resource type is replication-group and the unique identifier is the replication group name. Example: replication-group/mycluster. Neptune cluster - The resource type is cluster and the unique identifier is the cluster - name. Example: cluster:mycluster. SageMaker Serverless endpoint - The resource type is + name. Example: cluster:mycluster. SageMaker serverless endpoint - The resource type is variant and the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. SageMaker inference component - The resource type is inference-component and the unique identifier is the resource ID. Example: - inference-component/my-inference-component. + inference-component/my-inference-component. Pool of WorkSpaces - The resource type is + workspacespool and the unique identifier is the pool ID. Example: + workspacespool/wspool-123456. - `scalable_dimension`: The scalable dimension associated with the scalable target. This string consists of the service namespace, resource type, and scaling property. - ecs:service:DesiredCount - The desired task count of an ECS service. + ecs:service:DesiredCount - The task count of an ECS service. elasticmapreduce:instancegroup:InstanceCount - The instance count of an EMR Instance Group. ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot Fleet. - appstream:fleet:DesiredCapacity - The desired capacity of an AppStream 2.0 fleet. + appstream:fleet:DesiredCapacity - The capacity of an AppStream 2.0 fleet. dynamodb:table:ReadCapacityUnits - The provisioned read capacity for a DynamoDB table. dynamodb:table:WriteCapacityUnits - The provisioned write capacity for a DynamoDB table. dynamodb:index:ReadCapacityUnits - The provisioned read capacity for a DynamoDB global @@ -1361,8 +1399,10 @@ even if you don't include the MinCapacity or MaxCapacity request parameters. Amazon ElastiCache replication group. neptune:cluster:ReadReplicaCount - The count of read replicas in an Amazon Neptune DB cluster. sagemaker:variant:DesiredProvisionedConcurrency - The provisioned concurrency for a - SageMaker Serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The + SageMaker serverless endpoint. sagemaker:inference-component:DesiredCopyCount - The number of copies across an endpoint for a SageMaker inference component. + workspaces:workspacespool:DesiredUserSessions - The number of user sessions for the + WorkSpaces in the pool. - `service_namespace`: The namespace of the Amazon Web Services service that provides the resource. For a resource provided by your own application or service, use custom-resource instead. @@ -1383,20 +1423,20 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys capacity limit in response to changing demand. This property is required when registering a new scalable target. For the following resources, the minimum value allowed is 0. AppStream 2.0 fleets Aurora DB clusters ECS services EMR clusters Lambda - provisioned concurrency SageMaker endpoint variants SageMaker Serverless endpoint - provisioned concurrency Spot Fleets custom resources It's strongly recommended that - you specify a value greater than 0. A value greater than 0 means that data points are - continuously reported to CloudWatch that scaling policies can use to scale on a metric like - average CPU utilization. For all other resources, the minimum allowed value depends on the - type of resource that you are using. If you provide a value that is lower than what a - resource can accept, an error occurs. In which case, the error message will provide the - minimum value that the resource can accept. + provisioned concurrency SageMaker endpoint variants SageMaker inference components + SageMaker serverless endpoint provisioned concurrency Spot Fleets custom resources + It's strongly recommended that you specify a value greater than 0. A value greater than 0 + means that data points are continuously reported to CloudWatch that scaling policies can + use to scale on a metric like average CPU utilization. For all other resources, the minimum + allowed value depends on the type of resource that you are using. If you provide a value + that is lower than what a resource can accept, an error occurs. In which case, the error + message will provide the minimum value that the resource can accept. - `"RoleARN"`: This parameter is required for services that do not support service-linked roles (such as Amazon EMR), and it must specify the ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf. If the service supports service-linked roles, Application Auto Scaling uses a service-linked role, which - it creates if it does not yet exist. For more information, see Application Auto Scaling IAM - roles. + it creates if it does not yet exist. For more information, see How Application Auto Scaling + works with IAM. - `"SuspendedState"`: An embedded object that contains attributes and attribute values that are used to suspend and resume automatic scaling. Setting the value of an attribute to true suspends the specified scaling activities. Setting it to false (default) resumes the @@ -1405,8 +1445,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are suspended. For DynamicScalingOutSuspended, while a suspension is in effect, all scale-out activities that are triggered by a scaling policy are suspended. For ScheduledScalingSuspended, while a suspension is in effect, all scaling activities that - involve scheduled actions are suspended. For more information, see Suspending and - resuming scaling in the Application Auto Scaling User Guide. + involve scheduled actions are suspended. For more information, see Suspend and resume + scaling in the Application Auto Scaling User Guide. - `"Tags"`: Assigns one or more tags to the scalable target. Use this parameter to tag the scalable target when it is created. To tag an existing scalable target, use the TagResource operation. Each tag consists of a tag key and a tag value. Both the tag key and the tag @@ -1466,10 +1506,10 @@ tag key and a tag value. To edit a tag, specify an existing tag key and a new ta You can use this operation to tag an Application Auto Scaling scalable target, but you cannot tag a scaling policy or scheduled action. You can also add tags to an Application Auto Scaling scalable target while creating it (RegisterScalableTarget). For general -information about tags, including the format and syntax, see Tagging Amazon Web Services -resources in the Amazon Web Services General Reference. Use tags to control access to a -scalable target. For more information, see Tagging support for Application Auto Scaling in -the Application Auto Scaling User Guide. +information about tags, including the format and syntax, see Tagging your Amazon Web +Services resources in the Amazon Web Services General Reference. Use tags to control access +to a scalable target. For more information, see Tagging support for Application Auto +Scaling in the Application Auto Scaling User Guide. # Arguments - `resource_arn`: Identifies the Application Auto Scaling scalable target that you want to @@ -1482,7 +1522,7 @@ the Application Auto Scaling User Guide. specify an existing tag key with a different tag value, Application Auto Scaling replaces the current tag value with the specified one. For information about the rules that apply to tag keys and tag values, see User-defined tag restrictions in the Amazon Web Services - Billing and Cost Management User Guide. + Billing User Guide. """ function tag_resource(ResourceARN, Tags; aws_config::AbstractAWSConfig=global_aws_config()) diff --git a/src/services/application_signals.jl b/src/services/application_signals.jl index 00ef2fb82c..9616fa852d 100644 --- a/src/services/application_signals.jl +++ b/src/services/application_signals.jl @@ -9,9 +9,9 @@ using AWS.UUIDs batch_get_service_level_objective_budget_report(slo_ids, timestamp, params::Dict{String,<:Any}) Use this operation to retrieve one or more service level objective (SLO) budget reports. An -error budget is the amount of time in unhealthy periods that your service can accumulate -during an interval before your overall SLO budget health is breached and the SLO is -considered to be unmet. For example, an SLO with a threshold of 99.95% and a monthly +error budget is the amount of time or requests in an unhealthy state that your service can +accumulate during an interval before your overall SLO budget health is breached and the SLO +is considered to be unmet. For example, an SLO with a threshold of 99.95% and a monthly interval translates to an error budget of 21.9 minutes of downtime in a 30-day month. Budget reports include a health indicator, the attainment value, and remaining budget. For more information about SLO error budgets, see SLO concepts. @@ -56,8 +56,8 @@ function batch_get_service_level_objective_budget_report( end """ - create_service_level_objective(name, sli_config) - create_service_level_objective(name, sli_config, params::Dict{String,<:Any}) + create_service_level_objective(name) + create_service_level_objective(name, params::Dict{String,<:Any}) Creates a service level objective (SLO), which can help you ensure that your critical business operations are meeting customer expectations. Use SLOs to set and track specific @@ -65,36 +65,52 @@ target levels for the reliability and availability of your applications and serv use service level indicators (SLIs) to calculate whether the application is performing at the level that you want. Create an SLO to set a target for a service or operation’s availability or latency. CloudWatch measures this target frequently you can find whether it -has been breached. When you create an SLO, you set an attainment goal for it. An -attainment goal is the ratio of good periods that meet the threshold requirements to the -total periods within the interval. For example, an attainment goal of 99.9% means that -within your interval, you are targeting 99.9% of the periods to be in healthy state. After -you have created an SLO, you can retrieve error budget reports for it. An error budget is -the number of periods or amount of time that your service can accumulate during an interval -before your overall SLO budget health is breached and the SLO is considered to be unmet. -for example, an SLO with a threshold that 99.95% of requests must be completed under 2000ms -every month translates to an error budget of 21.9 minutes of downtime per month. When you -call this operation, Application Signals creates the -AWSServiceRoleForCloudWatchApplicationSignals service-linked role, if it doesn't already -exist in your account. This service- linked role has the following permissions: -xray:GetServiceGraph logs:StartQuery logs:GetQueryResults -cloudwatch:GetMetricData cloudwatch:ListMetrics tag:GetResources -autoscaling:DescribeAutoScalingGroups You can easily set SLO targets for your -applications that are discovered by Application Signals, using critical metrics such as -latency and availability. You can also set SLOs against any CloudWatch metric or math -expression that produces a time series. For more information about SLOs, see Service level -objectives (SLOs). +has been breached. The target performance quality that is defined for an SLO is the +attainment goal. You can set SLO targets for your applications that are discovered by +Application Signals, using critical metrics such as latency and availability. You can also +set SLOs against any CloudWatch metric or math expression that produces a time series. When +you create an SLO, you specify whether it is a period-based SLO or a request-based SLO. +Each type of SLO has a different way of evaluating your application's performance against +its attainment goal. A period-based SLO uses defined periods of time within a specified +total time interval. For each period of time, Application Signals determines whether the +application met its goal. The attainment rate is calculated as the number of good +periods/number of total periods. For example, for a period-based SLO, meeting an attainment +goal of 99.9% means that within your interval, your application must meet its performance +goal during at least 99.9% of the time periods. A request-based SLO doesn't use +pre-defined periods of time. Instead, the SLO measures number of good requests/number of +total requests during the interval. At any time, you can find the ratio of good requests to +total requests for the interval up to the time stamp that you specify, and measure that +ratio against the goal set in your SLO. After you have created an SLO, you can retrieve +error budget reports for it. An error budget is the amount of time or amount of requests +that your application can be non-compliant with the SLO's goal, and still have your +application meet the goal. For a period-based SLO, the error budget starts at a number +defined by the highest number of periods that can fail to meet the threshold, while still +meeting the overall goal. The remaining error budget decreases with every failed period +that is recorded. The error budget within one interval can never increase. For example, an +SLO with a threshold that 99.95% of requests must be completed under 2000ms every month +translates to an error budget of 21.9 minutes of downtime per month. For a request-based +SLO, the remaining error budget is dynamic and can increase or decrease, depending on the +ratio of good requests to total requests. For more information about SLOs, see Service +level objectives (SLOs). When you perform a CreateServiceLevelObjective operation, +Application Signals creates the AWSServiceRoleForCloudWatchApplicationSignals +service-linked role, if it doesn't already exist in your account. This service- linked role +has the following permissions: xray:GetServiceGraph logs:StartQuery +logs:GetQueryResults cloudwatch:GetMetricData cloudwatch:ListMetrics +tag:GetResources autoscaling:DescribeAutoScalingGroups # Arguments - `name`: A name for this SLO. -- `sli_config`: A structure that contains information about what service and what - performance metric that this SLO will monitor. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Description"`: An optional description for this SLO. -- `"Goal"`: A structure that contains the attributes that determine the goal of the SLO. - This includes the time period for evaluation and the attainment threshold. +- `"Goal"`: This structure contains the attributes that determine the goal of the SLO. +- `"RequestBasedSliConfig"`: If this SLO is a request-based SLO, this structure defines the + information about what performance metric this SLO will monitor. You can't specify both + RequestBasedSliConfig and SliConfig in the same operation. +- `"SliConfig"`: If this SLO is a period-based SLO, this structure defines the information + about what performance metric this SLO will monitor. You can't specify both + RequestBasedSliConfig and SliConfig in the same operation. - `"Tags"`: A list of key-value pairs to associate with the SLO. You can associate as many as 50 tags with an SLO. To be able to associate tags with the SLO when you create the SLO, you must have the cloudwatch:TagResource permission. Tags can help you organize and @@ -102,30 +118,23 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys user permission to access or change only resources with certain tag values. """ function create_service_level_objective( - Name, SliConfig; aws_config::AbstractAWSConfig=global_aws_config() + Name; aws_config::AbstractAWSConfig=global_aws_config() ) return application_signals( "POST", "/slo", - Dict{String,Any}("Name" => Name, "SliConfig" => SliConfig); + Dict{String,Any}("Name" => Name); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function create_service_level_objective( - Name, - SliConfig, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), + Name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return application_signals( "POST", "/slo", - Dict{String,Any}( - mergewith( - _merge, Dict{String,Any}("Name" => Name, "SliConfig" => SliConfig), params - ), - ); + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Name" => Name), params)); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -168,7 +177,8 @@ Returns information about a service discovered by Application Signals. # Arguments - `end_time`: The end of the time period to retrieve information about. When used in a raw - HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 Your + requested start time will be rounded to the nearest hour. - `key_attributes`: Use this field to specify which service you want to retrieve information for. You must specify at least the Type, Name, and Environment attributes. This is a string-to-string map. It can include the following fields. Type designates the type @@ -180,6 +190,7 @@ Returns information about a service discovered by Application Signals. Environment specifies the location where this object is hosted, or what it belongs to. - `start_time`: The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + Your requested start time will be rounded to the nearest hour. """ function get_service( @@ -256,7 +267,8 @@ services. # Arguments - `end_time`: The end of the time period to retrieve information about. When used in a raw - HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 Your + requested end time will be rounded to the nearest hour. - `key_attributes`: Use this field to specify which service you want to retrieve information for. You must specify at least the Type, Name, and Environment attributes. This is a string-to-string map. It can include the following fields. Type designates the type @@ -268,6 +280,7 @@ services. Environment specifies the location where this object is hosted, or what it belongs to. - `start_time`: The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + Your requested start time will be rounded to the nearest hour. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -325,7 +338,8 @@ are instrumented with CloudWatch RUM app monitors. # Arguments - `end_time`: The end of the time period to retrieve information about. When used in a raw - HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 Your + requested start time will be rounded to the nearest hour. - `key_attributes`: Use this field to specify which service you want to retrieve information for. You must specify at least the Type, Name, and Environment attributes. This is a string-to-string map. It can include the following fields. Type designates the type @@ -337,6 +351,7 @@ are instrumented with CloudWatch RUM app monitors. Environment specifies the location where this object is hosted, or what it belongs to. - `start_time`: The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + Your requested start time will be rounded to the nearest hour. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -429,7 +444,8 @@ Signals. Only the operations that were invoked during the specified time range a # Arguments - `end_time`: The end of the time period to retrieve information about. When used in a raw - HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 Your + requested end time will be rounded to the nearest hour. - `key_attributes`: Use this field to specify which service you want to retrieve information for. You must specify at least the Type, Name, and Environment attributes. This is a string-to-string map. It can include the following fields. Type designates the type @@ -441,6 +457,7 @@ Signals. Only the operations that were invoked during the specified time range a Environment specifies the location where this object is hosted, or what it belongs to. - `start_time`: The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + Your requested start time will be rounded to the nearest hour. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -498,9 +515,11 @@ Services are discovered through Application Signals instrumentation. # Arguments - `end_time`: The end of the time period to retrieve information about. When used in a raw - HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 Your + requested start time will be rounded to the nearest hour. - `start_time`: The start of the time period to retrieve information about. When used in a raw HTTP Query API, it is formatted as be epoch time in seconds. For example: 1698778057 + Your requested start time will be rounded to the nearest hour. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -720,7 +739,8 @@ end update_service_level_objective(id, params::Dict{String,<:Any}) Updates an existing service level objective (SLO). If you omit parameters, the previous -values of those parameters are retained. +values of those parameters are retained. You cannot change from a period-based SLO to a +request-based SLO, or change from a request-based SLO to a period-based SLO. # Arguments - `id`: The Amazon Resource Name (ARN) or name of the service level objective that you want @@ -731,8 +751,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Description"`: An optional description for the SLO. - `"Goal"`: A structure that contains the attributes that determine the goal of the SLO. This includes the time period for evaluation and the attainment threshold. -- `"SliConfig"`: A structure that contains information about what performance metric this - SLO will monitor. +- `"RequestBasedSliConfig"`: If this SLO is a request-based SLO, this structure defines the + information about what performance metric this SLO will monitor. You can't specify both + SliConfig and RequestBasedSliConfig in the same operation. +- `"SliConfig"`: If this SLO is a period-based SLO, this structure defines the information + about what performance metric this SLO will monitor. """ function update_service_level_objective( Id; aws_config::AbstractAWSConfig=global_aws_config() diff --git a/src/services/appstream.jl b/src/services/appstream.jl index b14cd3d09e..aeab98d5de 100644 --- a/src/services/appstream.jl +++ b/src/services/appstream.jl @@ -749,7 +749,9 @@ applications and desktops. stream.graphics-design.2xlarge stream.graphics-design.4xlarge stream.graphics-desktop.2xlarge stream.graphics.g4dn.xlarge stream.graphics.g4dn.2xlarge stream.graphics.g4dn.4xlarge stream.graphics.g4dn.8xlarge - stream.graphics.g4dn.12xlarge stream.graphics.g4dn.16xlarge + stream.graphics.g4dn.12xlarge stream.graphics.g4dn.16xlarge stream.graphics.g5.xlarge + stream.graphics.g5.2xlarge stream.graphics.g5.4xlarge stream.graphics.g5.8xlarge + stream.graphics.g5.12xlarge stream.graphics.g5.16xlarge stream.graphics.g5.24xlarge stream.graphics-pro.4xlarge stream.graphics-pro.8xlarge stream.graphics-pro.16xlarge The following instance types are available for Elastic fleets: stream.standard.small stream.standard.medium stream.standard.large stream.standard.xlarge @@ -765,7 +767,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming - instance. Specify a value between 60 and 360000. + instance. Specify a value between 60 and 36000. - `"DisplayName"`: The fleet name to display. - `"DomainJoinInfo"`: The name of the directory and organizational unit (OU) to use to join the fleet to a Microsoft Active Directory domain. This is not allowed for Elastic fleets. @@ -793,13 +795,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected. To prevent users from being disconnected due to inactivity, specify a value of 0. - Otherwise, specify a value between 60 and 3600. The default value is 0. If you enable this - feature, we recommend that you specify a value that corresponds exactly to a whole number - of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to - the nearest minute. For example, if you specify a value of 70, users are disconnected after - 1 minute of inactivity. If you specify a value that is at the midpoint between two - different minutes, the value is rounded up. For example, if you specify a value of 90, - users are disconnected after 2 minutes of inactivity. + Otherwise, specify a value between 60 and 36000. The default value is 0. If you enable + this feature, we recommend that you specify a value that corresponds exactly to a whole + number of minutes (for example, 60, 120, and 180). If you don't do this, the value is + rounded to the nearest minute. For example, if you specify a value of 70, users are + disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint + between two different minutes, the value is rounded up. For example, if you specify a value + of 90, users are disconnected after 2 minutes of inactivity. - `"ImageArn"`: The ARN of the public, private, or shared image to use. - `"ImageName"`: The name of the image used to create the fleet. - `"MaxConcurrentSessions"`: The maximum concurrent sessions of the Elastic fleet. This is @@ -1092,6 +1094,83 @@ function create_streaming_url( ) end +""" + create_theme_for_stack(favicon_s3_location, organization_logo_s3_location, stack_name, theme_styling, title_text) + create_theme_for_stack(favicon_s3_location, organization_logo_s3_location, stack_name, theme_styling, title_text, params::Dict{String,<:Any}) + +Creates custom branding that customizes the appearance of the streaming application catalog +page. + +# Arguments +- `favicon_s3_location`: The S3 location of the favicon. The favicon enables users to + recognize their application streaming site in a browser full of tabs or bookmarks. It is + displayed at the top of the browser tab for the application streaming site during users' + streaming sessions. +- `organization_logo_s3_location`: The organization logo that appears on the streaming + application catalog page. +- `stack_name`: The name of the stack for the theme. +- `theme_styling`: The color theme that is applied to website links, text, and buttons. + These colors are also applied as accents in the background for the streaming application + catalog page. +- `title_text`: The title that is displayed at the top of the browser tab during users' + application streaming sessions. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"FooterLinks"`: The links that are displayed in the footer of the streaming application + catalog page. These links are helpful resources for users, such as the organization's IT + support and product marketing sites. +""" +function create_theme_for_stack( + FaviconS3Location, + OrganizationLogoS3Location, + StackName, + ThemeStyling, + TitleText; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appstream( + "CreateThemeForStack", + Dict{String,Any}( + "FaviconS3Location" => FaviconS3Location, + "OrganizationLogoS3Location" => OrganizationLogoS3Location, + "StackName" => StackName, + "ThemeStyling" => ThemeStyling, + "TitleText" => TitleText, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_theme_for_stack( + FaviconS3Location, + OrganizationLogoS3Location, + StackName, + ThemeStyling, + TitleText, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appstream( + "CreateThemeForStack", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "FaviconS3Location" => FaviconS3Location, + "OrganizationLogoS3Location" => OrganizationLogoS3Location, + "StackName" => StackName, + "ThemeStyling" => ThemeStyling, + "TitleText" => TitleText, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_updated_image(existing_image_name, new_image_name) create_updated_image(existing_image_name, new_image_name, params::Dict{String,<:Any}) @@ -1567,6 +1646,42 @@ function delete_stack( ) end +""" + delete_theme_for_stack(stack_name) + delete_theme_for_stack(stack_name, params::Dict{String,<:Any}) + +Deletes custom branding that customizes the appearance of the streaming application catalog +page. + +# Arguments +- `stack_name`: The name of the stack for the theme. + +""" +function delete_theme_for_stack( + StackName; aws_config::AbstractAWSConfig=global_aws_config() +) + return appstream( + "DeleteThemeForStack", + Dict{String,Any}("StackName" => StackName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_theme_for_stack( + StackName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appstream( + "DeleteThemeForStack", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("StackName" => StackName), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_usage_report_subscription() delete_usage_report_subscription(params::Dict{String,<:Any}) @@ -2070,6 +2185,42 @@ function describe_stacks( ) end +""" + describe_theme_for_stack(stack_name) + describe_theme_for_stack(stack_name, params::Dict{String,<:Any}) + +Retrieves a list that describes the theme for a specified stack. A theme is custom branding +that customizes the appearance of the streaming application catalog page. + +# Arguments +- `stack_name`: The name of the stack for the theme. + +""" +function describe_theme_for_stack( + StackName; aws_config::AbstractAWSConfig=global_aws_config() +) + return appstream( + "DescribeThemeForStack", + Dict{String,Any}("StackName" => StackName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_theme_for_stack( + StackName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appstream( + "DescribeThemeForStack", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("StackName" => StackName), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_usage_report_subscriptions() describe_usage_report_subscriptions(params::Dict{String,<:Any}) @@ -3147,7 +3298,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys active after users disconnect. If users try to reconnect to the streaming session after a disconnection or network interruption within this time interval, they are connected to their previous session. Otherwise, they are connected to a new session with a new streaming - instance. Specify a value between 60 and 360000. + instance. Specify a value between 60 and 36000. - `"DisplayName"`: The fleet name to display. - `"DomainJoinInfo"`: The name of the directory and organizational unit (OU) to use to join the fleet to a Microsoft Active Directory domain. @@ -3170,13 +3321,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys out, and pixels changing do not qualify as user activity. If users continue to be idle after the time interval in IdleDisconnectTimeoutInSeconds elapses, they are disconnected. To prevent users from being disconnected due to inactivity, specify a value of 0. - Otherwise, specify a value between 60 and 3600. The default value is 0. If you enable this - feature, we recommend that you specify a value that corresponds exactly to a whole number - of minutes (for example, 60, 120, and 180). If you don't do this, the value is rounded to - the nearest minute. For example, if you specify a value of 70, users are disconnected after - 1 minute of inactivity. If you specify a value that is at the midpoint between two - different minutes, the value is rounded up. For example, if you specify a value of 90, - users are disconnected after 2 minutes of inactivity. + Otherwise, specify a value between 60 and 36000. The default value is 0. If you enable + this feature, we recommend that you specify a value that corresponds exactly to a whole + number of minutes (for example, 60, 120, and 180). If you don't do this, the value is + rounded to the nearest minute. For example, if you specify a value of 70, users are + disconnected after 1 minute of inactivity. If you specify a value that is at the midpoint + between two different minutes, the value is rounded up. For example, if you specify a value + of 90, users are disconnected after 2 minutes of inactivity. - `"ImageArn"`: The ARN of the public, private, or shared image to use. - `"ImageName"`: The name of the image used to create the fleet. - `"InstanceType"`: The instance type to use when launching fleet instances. The following @@ -3337,3 +3488,57 @@ function update_stack( feature_set=SERVICE_FEATURE_SET, ) end + +""" + update_theme_for_stack(stack_name) + update_theme_for_stack(stack_name, params::Dict{String,<:Any}) + +Updates custom branding that customizes the appearance of the streaming application catalog +page. + +# Arguments +- `stack_name`: The name of the stack for the theme. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AttributesToDelete"`: The attributes to delete. +- `"FaviconS3Location"`: The S3 location of the favicon. The favicon enables users to + recognize their application streaming site in a browser full of tabs or bookmarks. It is + displayed at the top of the browser tab for the application streaming site during users' + streaming sessions. +- `"FooterLinks"`: The links that are displayed in the footer of the streaming application + catalog page. These links are helpful resources for users, such as the organization's IT + support and product marketing sites. +- `"OrganizationLogoS3Location"`: The organization logo that appears on the streaming + application catalog page. +- `"State"`: Specifies whether custom branding should be applied to catalog page or not. +- `"ThemeStyling"`: The color theme that is applied to website links, text, and buttons. + These colors are also applied as accents in the background for the streaming application + catalog page. +- `"TitleText"`: The title that is displayed at the top of the browser tab during users' + application streaming sessions. +""" +function update_theme_for_stack( + StackName; aws_config::AbstractAWSConfig=global_aws_config() +) + return appstream( + "UpdateThemeForStack", + Dict{String,Any}("StackName" => StackName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_theme_for_stack( + StackName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return appstream( + "UpdateThemeForStack", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("StackName" => StackName), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/arc_zonal_shift.jl b/src/services/arc_zonal_shift.jl index 57a6c9300b..b038bb8081 100644 --- a/src/services/arc_zonal_shift.jl +++ b/src/services/arc_zonal_shift.jl @@ -48,8 +48,11 @@ autoshift. A practice run configuration includes specifications for blocked date blocked time windows, and for Amazon CloudWatch alarms that you create to use with practice runs. The alarms that you specify are an outcome alarm, to monitor application health during practice runs and, optionally, a blocking alarm, to block practice runs from -starting. For more information, see Considerations when you configure zonal autoshift in -the Amazon Route 53 Application Recovery Controller Developer Guide. +starting. When a resource has a practice run configuration, Route 53 ARC starts zonal +shifts for the resource weekly, to shift traffic for practice runs. Practice runs help you +to ensure that shifting away traffic from an Availability Zone during an autoshift is safe +for your application. For more information, see Considerations when you configure zonal +autoshift in the Amazon Route 53 Application Recovery Controller Developer Guide. # Arguments - `outcome_alarms`: The outcome alarm for practice runs is a required Amazon CloudWatch @@ -58,10 +61,10 @@ the Amazon Route 53 Application Recovery Controller Developer Guide. from an Availability Zone during each weekly practice run. You should configure the alarm to go into an ALARM state if your application is impacted by the zonal shift, and you want to stop the zonal shift, to let traffic for the resource return to the Availability Zone. -- `resource_identifier`: The identifier of the resource to shift away traffic for when a - practice run starts a zonal shift. The identifier is the Amazon Resource Name (ARN) for the - resource. At this time, supported resources are Network Load Balancers and Application Load - Balancers with cross-zone load balancing turned off. +- `resource_identifier`: The identifier of the resource that Amazon Web Services shifts + traffic for with a practice run zonal shift. The identifier is the Amazon Resource Name + (ARN) for the resource. At this time, supported resources are Network Load Balancers and + Application Load Balancers with cross-zone load balancing turned off. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -158,6 +161,42 @@ function delete_practice_run_configuration( ) end +""" + get_autoshift_observer_notification_status() + get_autoshift_observer_notification_status(params::Dict{String,<:Any}) + +Returns the status of autoshift observer notification. Autoshift observer notification +enables you to be notified, through Amazon EventBridge, when there is an autoshift event +for zonal autoshift. If the status is ENABLED, Route 53 ARC includes all autoshift events +when you use the EventBridge pattern Autoshift In Progress. When the status is DISABLED, +Route 53 ARC includes only autoshift events for autoshifts when one or more of your +resources is included in the autoshift. For more information, see Notifications for +practice runs and autoshifts in the Amazon Route 53 Application Recovery Controller +Developer Guide. + +""" +function get_autoshift_observer_notification_status(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return arc_zonal_shift( + "GET", + "/autoshift-observer-notification"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_autoshift_observer_notification_status( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return arc_zonal_shift( + "GET", + "/autoshift-observer-notification", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_managed_resource(resource_identifier) get_managed_resource(resource_identifier, params::Dict{String,<:Any}) @@ -170,10 +209,10 @@ start a zonal shift or configure zonal autoshift for Network Load Balancers and Load Balancers with cross-zone load balancing turned off. # Arguments -- `resource_identifier`: The identifier for the resource to shift away traffic for. The - identifier is the Amazon Resource Name (ARN) for the resource. At this time, supported - resources are Network Load Balancers and Application Load Balancers with cross-zone load - balancing turned off. +- `resource_identifier`: The identifier for the resource that Amazon Web Services shifts + traffic for. The identifier is the Amazon Resource Name (ARN) for the resource. At this + time, supported resources are Network Load Balancers and Application Load Balancers with + cross-zone load balancing turned off. """ function get_managed_resource( @@ -204,7 +243,9 @@ end list_autoshifts() list_autoshifts(params::Dict{String,<:Any}) -Returns the active autoshifts for a specified resource. +Returns a list of autoshifts for an Amazon Web Services Region. By default, the call +returns only ACTIVE autoshifts. Optionally, you can specify the status parameter to return +COMPLETED autoshifts. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -269,9 +310,9 @@ end Lists all active and completed zonal shifts in Amazon Route 53 Application Recovery Controller in your Amazon Web Services account in this Amazon Web Services Region. -ListZonalShifts returns customer-started zonal shifts, as well as practice run zonal shifts -that Route 53 ARC started on your behalf for zonal autoshift. The ListZonalShifts operation -does not list autoshifts. For more information about listing autoshifts, see +ListZonalShifts returns customer-initiated zonal shifts, as well as practice run zonal +shifts that Route 53 ARC started on your behalf for zonal autoshift. The ListZonalShifts +operation does not list autoshifts. For more information about listing autoshifts, see \">ListAutoshifts. # Optional Parameters @@ -324,9 +365,10 @@ Availability Zone to complete. For more information, see Zonal shift in the Amaz Application Recovery Controller Developer Guide. # Arguments -- `away_from`: The Availability Zone that traffic is moved away from for a resource when - you start a zonal shift. Until the zonal shift expires or you cancel it, traffic for the - resource is instead moved to other Availability Zones in the Amazon Web Services Region. +- `away_from`: The Availability Zone (for example, use1-az1) that traffic is moved away + from for a resource when you start a zonal shift. Until the zonal shift expires or you + cancel it, traffic for the resource is instead moved to other Availability Zones in the + Amazon Web Services Region. - `comment`: A comment that you enter about the zonal shift. Only the latest comment is retained; no comment history is maintained. A new comment overwrites any existing comment string. @@ -340,10 +382,10 @@ Application Recovery Controller Developer Guide. A lowercase letter m: To specify that the value is in minutes. A lowercase letter h: To specify that the value is in hours. For example: 20h means the zonal shift expires in 20 hours. 120m means the zonal shift expires in 120 minutes (2 hours). -- `resource_identifier`: The identifier for the resource to shift away traffic for. The - identifier is the Amazon Resource Name (ARN) for the resource. At this time, supported - resources are Network Load Balancers and Application Load Balancers with cross-zone load - balancing turned off. +- `resource_identifier`: The identifier for the resource that Amazon Web Services shifts + traffic for. The identifier is the Amazon Resource Name (ARN) for the resource. At this + time, supported resources are Network Load Balancers and Application Load Balancers with + cross-zone load balancing turned off. """ function start_zonal_shift( @@ -394,6 +436,50 @@ function start_zonal_shift( ) end +""" + update_autoshift_observer_notification_status(status) + update_autoshift_observer_notification_status(status, params::Dict{String,<:Any}) + +Update the status of autoshift observer notification. Autoshift observer notification +enables you to be notified, through Amazon EventBridge, when there is an autoshift event +for zonal autoshift. If the status is ENABLED, Route 53 ARC includes all autoshift events +when you use the EventBridge pattern Autoshift In Progress. When the status is DISABLED, +Route 53 ARC includes only autoshift events for autoshifts when one or more of your +resources is included in the autoshift. For more information, see Notifications for +practice runs and autoshifts in the Amazon Route 53 Application Recovery Controller +Developer Guide. + +# Arguments +- `status`: The status to set for autoshift observer notification. If the status is + ENABLED, Route 53 ARC includes all autoshift events when you use the Amazon EventBridge + pattern Autoshift In Progress. When the status is DISABLED, Route 53 ARC includes only + autoshift events for autoshifts when one or more of your resources is included in the + autoshift. + +""" +function update_autoshift_observer_notification_status( + status; aws_config::AbstractAWSConfig=global_aws_config() +) + return arc_zonal_shift( + "PUT", + "/autoshift-observer-notification", + Dict{String,Any}("status" => status); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_autoshift_observer_notification_status( + status, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return arc_zonal_shift( + "PUT", + "/autoshift-observer-notification", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("status" => status), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_practice_run_configuration(resource_identifier) update_practice_run_configuration(resource_identifier, params::Dict{String,<:Any}) @@ -457,17 +543,25 @@ end update_zonal_autoshift_configuration(resource_identifier, zonal_autoshift_status) update_zonal_autoshift_configuration(resource_identifier, zonal_autoshift_status, params::Dict{String,<:Any}) -You can update the zonal autoshift status for a resource, to enable or disable zonal -autoshift. When zonal autoshift is ENABLED, Amazon Web Services shifts away resource -traffic from an Availability Zone, on your behalf, when Amazon Web Services determines that -there's an issue in the Availability Zone that could potentially affect customers. +The zonal autoshift configuration for a resource includes the practice run configuration +and the status for running autoshifts, zonal autoshift status. When a resource has a +practice run configuation, Route 53 ARC starts weekly zonal shifts for the resource, to +shift traffic away from an Availability Zone. Weekly practice runs help you to make sure +that your application can continue to operate normally with the loss of one Availability +Zone. You can update the zonal autoshift autoshift status to enable or disable zonal +autoshift. When zonal autoshift is ENABLED, you authorize Amazon Web Services to shift away +resource traffic for an application from an Availability Zone during events, on your +behalf, to help reduce time to recovery. Traffic is also shifted away for the required +weekly practice runs. # Arguments - `resource_identifier`: The identifier for the resource that you want to update the zonal autoshift configuration for. The identifier is the Amazon Resource Name (ARN) for the resource. - `zonal_autoshift_status`: The zonal autoshift status for the resource that you want to - update the zonal autoshift configuration for. + update the zonal autoshift configuration for. Choose ENABLED to authorize Amazon Web + Services to shift away resource traffic for an application from an Availability Zone during + events, on your behalf, to help reduce time to recovery. """ function update_zonal_autoshift_configuration( diff --git a/src/services/auto_scaling.jl b/src/services/auto_scaling.jl index 00c4e42fe6..65be51c868 100644 --- a/src/services/auto_scaling.jl +++ b/src/services/auto_scaling.jl @@ -525,10 +525,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys information, see Set the health check grace period for an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide. Default: 0 seconds - `"HealthCheckType"`: A comma-separated value string of one or more health check types. - The valid values are EC2, ELB, and VPC_LATTICE. EC2 is the default health check and cannot - be disabled. For more information, see Health checks for instances in an Auto Scaling group - in the Amazon EC2 Auto Scaling User Guide. Only specify EC2 if you must clear a value that - was previously set. + The valid values are EC2, EBS, ELB, and VPC_LATTICE. EC2 is the default health check and + cannot be disabled. For more information, see Health checks for instances in an Auto + Scaling group in the Amazon EC2 Auto Scaling User Guide. Only specify EC2 if you must clear + a value that was previously set. - `"InstanceId"`: The ID of the instance used to base the launch configuration on. If specified, Amazon EC2 Auto Scaling uses the configuration values from the specified instance to create a new launch configuration. To get the instance ID, use the Amazon EC2 @@ -3166,8 +3166,8 @@ end set_instance_health(health_status, instance_id) set_instance_health(health_status, instance_id, params::Dict{String,<:Any}) -Sets the health status of the specified instance. For more information, see Health checks -for instances in an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide. +Sets the health status of the specified instance. For more information, see Set up a custom +health check for your Auto Scaling group in the Amazon EC2 Auto Scaling User Guide. # Arguments - `health_status`: The health status of the instance. Set to Healthy to have the instance @@ -3533,10 +3533,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys information, see Set the health check grace period for an Auto Scaling group in the Amazon EC2 Auto Scaling User Guide. - `"HealthCheckType"`: A comma-separated value string of one or more health check types. - The valid values are EC2, ELB, and VPC_LATTICE. EC2 is the default health check and cannot - be disabled. For more information, see Health checks for instances in an Auto Scaling group - in the Amazon EC2 Auto Scaling User Guide. Only specify EC2 if you must clear a value that - was previously set. + The valid values are EC2, EBS, ELB, and VPC_LATTICE. EC2 is the default health check and + cannot be disabled. For more information, see Health checks for instances in an Auto + Scaling group in the Amazon EC2 Auto Scaling User Guide. Only specify EC2 if you must clear + a value that was previously set. - `"InstanceMaintenancePolicy"`: An instance maintenance policy. For more information, see Set instance maintenance policy in the Amazon EC2 Auto Scaling User Guide. - `"LaunchConfigurationName"`: The name of the launch configuration. If you specify diff --git a/src/services/backup.jl b/src/services/backup.jl index c40b2802ec..bb69e463e3 100644 --- a/src/services/backup.jl +++ b/src/services/backup.jl @@ -8,18 +8,16 @@ using AWS.UUIDs cancel_legal_hold(cancel_description, legal_hold_id) cancel_legal_hold(cancel_description, legal_hold_id, params::Dict{String,<:Any}) -This action removes the specified legal hold on a recovery point. This action can only be -performed by a user with sufficient permissions. +Removes the specified legal hold on a recovery point. This action can only be performed by +a user with sufficient permissions. # Arguments -- `cancel_description`: String describing the reason for removing the legal hold. -- `legal_hold_id`: Legal hold ID required to remove the specified legal hold on a recovery - point. +- `cancel_description`: A string the describes the reason for removing the legal hold. +- `legal_hold_id`: The ID of the legal hold. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"retainRecordInDays"`: The integer amount in days specifying amount of days after this - API operation to remove legal hold. +- `"retainRecordInDays"`: The integer amount, in days, after which to remove legal hold. """ function cancel_legal_hold( cancelDescription, legalHoldId; aws_config::AbstractAWSConfig=global_aws_config() @@ -61,14 +59,12 @@ points for resources. If you call CreateBackupPlan with a plan that already exis receive an AlreadyExistsException exception. # Arguments -- `backup_plan`: Specifies the body of a backup plan. Includes a BackupPlanName and one or - more sets of Rules. +- `backup_plan`: The body of a backup plan. Includes a BackupPlanName and one or more sets + of Rules. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"BackupPlanTags"`: To help organize your resources, you can assign your own metadata to - the resources that you create. Each tag is a key-value pair. The specified tags are - assigned to all backups created with this plan. +- `"BackupPlanTags"`: The tags to assign to the backup plan. - `"CreatorRequestId"`: Identifies the request and allows failed requests to be retried without the risk of running the operation twice. If the request includes a CreatorRequestId that matches an existing backup plan, that plan is returned. This parameter is optional. If @@ -107,10 +103,8 @@ Creates a JSON document that specifies a set of resources to assign to a backup examples, see Assigning resources programmatically. # Arguments -- `backup_selection`: Specifies the body of a request to assign a set of resources to a - backup plan. -- `backup_plan_id`: Uniquely identifies the backup plan to be associated with the selection - of resources. +- `backup_selection`: The body of a request to assign a set of resources to a backup plan. +- `backup_plan_id`: The ID of the backup plan. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -164,8 +158,7 @@ include sensitive data, such as passport numbers, in the name of a backup vault. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"BackupVaultTags"`: Metadata that you can assign to help organize the resources that you - create. Each tag is a key-value pair. +- `"BackupVaultTags"`: The tags to assign to the backup vault. - `"CreatorRequestId"`: A unique string that identifies the request and allows failed requests to be retried without the risk of running the operation twice. This parameter is optional. If used, this parameter must contain 1 to 50 alphanumeric or '-_.' characters. @@ -207,8 +200,8 @@ define your policies, you can evaluate whether your backup practices comply with policies and which resources are not yet in compliance. # Arguments -- `framework_controls`: A list of the controls that make up the framework. Each control in - the list has a name, input parameters, and scope. +- `framework_controls`: The controls that make up the framework. Each control in the list + has a name, input parameters, and scope. - `framework_name`: The unique name of the framework. The name must be between 1 and 256 characters, starting with a letter, and consisting of letters (a-z, A-Z), numbers (0-9), and underscores (_). @@ -217,8 +210,7 @@ policies and which resources are not yet in compliance. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"FrameworkDescription"`: An optional description of the framework with a maximum of 1,024 characters. -- `"FrameworkTags"`: Metadata that you can assign to help organize the frameworks that you - create. Each tag is a key-value pair. +- `"FrameworkTags"`: The tags to assign to the framework. - `"IdempotencyToken"`: A customer-chosen string that you can use to distinguish between otherwise identical calls to CreateFrameworkInput. Retrying a successful request with the same idempotency token results in a success message with no action taken. @@ -267,22 +259,22 @@ end create_legal_hold(description, title) create_legal_hold(description, title, params::Dict{String,<:Any}) -This action creates a legal hold on a recovery point (backup). A legal hold is a restraint -on altering or deleting a backup until an authorized user cancels the legal hold. Any -actions to delete or disassociate a recovery point will fail with an error if one or more -active legal holds are on the recovery point. +Creates a legal hold on a recovery point (backup). A legal hold is a restraint on altering +or deleting a backup until an authorized user cancels the legal hold. Any actions to delete +or disassociate a recovery point will fail with an error if one or more active legal holds +are on the recovery point. # Arguments -- `description`: This is the string description of the legal hold. -- `title`: This is the string title of the legal hold. +- `description`: The description of the legal hold. +- `title`: The title of the legal hold. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"IdempotencyToken"`: This is a user-chosen string used to distinguish between otherwise identical calls. Retrying a successful request with the same idempotency token results in a success message with no action taken. -- `"RecoveryPointSelection"`: This specifies criteria to assign a set of resources, such as - resource types or backup vaults. +- `"RecoveryPointSelection"`: The criteria to assign a set of resources, such as resource + types or backup vaults. - `"Tags"`: Optional tags to include. A tag is a key-value pair you can use to manage, filter, and search for your resources. Allowed characters include UTF-8 letters, numbers, spaces, and the following characters: + - = . _ : /. @@ -323,34 +315,25 @@ end create_logically_air_gapped_backup_vault(max_retention_days, min_retention_days, backup_vault_name) create_logically_air_gapped_backup_vault(max_retention_days, min_retention_days, backup_vault_name, params::Dict{String,<:Any}) -This request creates a logical container to where backups may be copied. This request -includes a name, the Region, the maximum number of retention days, the minimum number of -retention days, and optionally can include tags and a creator request ID. Do not include -sensitive data, such as passport numbers, in the name of a backup vault. +Creates a logical container to where backups may be copied. This request includes a name, +the Region, the maximum number of retention days, the minimum number of retention days, and +optionally can include tags and a creator request ID. Do not include sensitive data, such +as passport numbers, in the name of a backup vault. # Arguments -- `max_retention_days`: This is the setting that specifies the maximum retention period - that the vault retains its recovery points. If this parameter is not specified, Backup does - not enforce a maximum retention period on the recovery points in the vault (allowing - indefinite storage). If specified, any backup or copy job to the vault must have a - lifecycle policy with a retention period equal to or shorter than the maximum retention - period. If the job retention period is longer than that maximum retention period, then the - vault fails the backup or copy job, and you should either modify your lifecycle settings or - use a different vault. +- `max_retention_days`: The maximum retention period that the vault retains its recovery + points. - `min_retention_days`: This setting specifies the minimum retention period that the vault - retains its recovery points. If this parameter is not specified, no minimum retention - period is enforced. If specified, any backup or copy job to the vault must have a lifecycle - policy with a retention period equal to or longer than the minimum retention period. If a - job retention period is shorter than that minimum retention period, then the vault fails - the backup or copy job, and you should either modify your lifecycle settings or use a - different vault. -- `backup_vault_name`: This is the name of the vault that is being created. + retains its recovery points. The minimum value accepted is 7 days. +- `backup_vault_name`: The name of a logical container where backups are stored. Logically + air-gapped backup vaults are identified by names that are unique to the account used to + create them and the Region where they are created. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"BackupVaultTags"`: These are the tags that will be included in the newly-created vault. -- `"CreatorRequestId"`: This is the ID of the creation request. This parameter is optional. - If used, this parameter must contain 1 to 50 alphanumeric or '-_.' characters. +- `"BackupVaultTags"`: The tags to assign to the vault. +- `"CreatorRequestId"`: The ID of the creation request. This parameter is optional. If + used, this parameter must contain 1 to 50 alphanumeric or '-_.' characters. """ function create_logically_air_gapped_backup_vault( MaxRetentionDays, @@ -421,8 +404,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys same idempotency token results in a success message with no action taken. - `"ReportPlanDescription"`: An optional description of the report plan with a maximum of 1,024 characters. -- `"ReportPlanTags"`: Metadata that you can assign to help organize the report plans that - you create. Each tag is a key-value pair. +- `"ReportPlanTags"`: The tags to assign to the report plan. """ function create_report_plan( ReportDeliveryChannel, @@ -474,10 +456,8 @@ end create_restore_testing_plan(restore_testing_plan) create_restore_testing_plan(restore_testing_plan, params::Dict{String,<:Any}) -This is the first of two steps to create a restore testing plan; once this request is -successful, finish the procedure with request CreateRestoreTestingSelection. You must -include the parameter RestoreTestingPlan. You may optionally include CreatorRequestId and -Tags. +Creates a restore testing plan. The first of two steps to create a restore testing plan. +After this request is successful, finish the procedure using CreateRestoreTestingSelection. # Arguments - `restore_testing_plan`: A restore testing plan must contain a unique @@ -493,9 +473,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys failed requests to be retriedwithout the risk of running the operation twice. This parameter is optional. If used, this parameter must contain 1 to 50 alphanumeric or '-_.' characters. -- `"Tags"`: Optional tags to include. A tag is a key-value pair you can use to manage, - filter, and search for your resources. Allowed characters include UTF-8 letters,numbers, - spaces, and the following characters: + - = . _ : /. +- `"Tags"`: The tags to assign to the restore testing plan. """ function create_restore_testing_plan( RestoreTestingPlan; aws_config::AbstractAWSConfig=global_aws_config() @@ -671,8 +649,7 @@ Deletes the backup vault identified by its name. A vault can be deleted only if # Arguments - `backup_vault_name`: The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the - Amazon Web Services Region where they are created. They consist of lowercase letters, - numbers, and hyphens. + Amazon Web Services Region where they are created. """ function delete_backup_vault( @@ -782,7 +759,7 @@ Deletes event notifications for the specified backup vault. # Arguments - `backup_vault_name`: The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the - Region where they are created. They consist of lowercase letters, numbers, and hyphens. + Region where they are created. """ function delete_backup_vault_notifications( @@ -858,8 +835,7 @@ be successful and will enter an EXPIRED state. # Arguments - `backup_vault_name`: The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the - Amazon Web Services Region where they are created. They consist of lowercase letters, - numbers, and hyphens. + Amazon Web Services Region where they are created. - `recovery_point_arn`: An Amazon Resource Name (ARN) that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45. @@ -1043,12 +1019,11 @@ Returns metadata about a backup vault specified by its name. # Arguments - `backup_vault_name`: The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the - Amazon Web Services Region where they are created. They consist of lowercase letters, - numbers, and hyphens. + Amazon Web Services Region where they are created. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"backupVaultAccountId"`: This is the account ID of the specified backup vault. +- `"backupVaultAccountId"`: The account ID of the specified backup vault. """ function describe_backup_vault( backupVaultName; aws_config::AbstractAWSConfig=global_aws_config() @@ -1212,15 +1187,14 @@ lifecycle. # Arguments - `backup_vault_name`: The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the - Amazon Web Services Region where they are created. They consist of lowercase letters, - numbers, and hyphens. + Amazon Web Services Region where they are created. - `recovery_point_arn`: An Amazon Resource Name (ARN) that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"backupVaultAccountId"`: This is the account ID of the specified backup vault. +- `"backupVaultAccountId"`: The account ID of the specified backup vault. """ function describe_recovery_point( backupVaultName, recoveryPointArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -1424,12 +1398,11 @@ This action to a specific child (nested) recovery point removes the relationship the specified recovery point and its parent (composite) recovery point. # Arguments -- `backup_vault_name`: This is the name of a logical container where the child (nested) - recovery point is stored. Backup vaults are identified by names that are unique to the - account used to create them and the Amazon Web Services Region where they are created. They - consist of lowercase letters, numbers, and hyphens. -- `recovery_point_arn`: This is the Amazon Resource Name (ARN) that uniquely identifies the - child (nested) recovery point; for example, +- `backup_vault_name`: The name of a logical container where the child (nested) recovery + point is stored. Backup vaults are identified by names that are unique to the account used + to create them and the Amazon Web Services Region where they are created. +- `recovery_point_arn`: The Amazon Resource Name (ARN) that uniquely identifies the child + (nested) recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45. """ @@ -1651,8 +1624,7 @@ Returns the access policy document that is associated with the named backup vaul # Arguments - `backup_vault_name`: The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the - Amazon Web Services Region where they are created. They consist of lowercase letters, - numbers, and hyphens. + Amazon Web Services Region where they are created. """ function get_backup_vault_access_policy( @@ -1688,8 +1660,7 @@ Returns event notifications for the specified backup vault. # Arguments - `backup_vault_name`: The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the - Amazon Web Services Region where they are created. They consist of lowercase letters, - numbers, and hyphens. + Amazon Web Services Region where they are created. """ function get_backup_vault_notifications( @@ -1724,8 +1695,7 @@ This action returns details for a specified legal hold. The details are the body hold in JSON format, in addition to metadata. # Arguments -- `legal_hold_id`: This is the ID required to use GetLegalHold. This unique ID is - associated with a specific legal hold. +- `legal_hold_id`: The ID of the legal hold. """ function get_legal_hold(legalHoldId; aws_config::AbstractAWSConfig=global_aws_config()) @@ -1759,15 +1729,14 @@ Returns a set of metadata key-value pairs that were used to create the backup. # Arguments - `backup_vault_name`: The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the - Amazon Web Services Region where they are created. They consist of lowercase letters, - numbers, and hyphens. + Amazon Web Services Region where they are created. - `recovery_point_arn`: An Amazon Resource Name (ARN) that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"backupVaultAccountId"`: This is the account ID of the specified backup vault. +- `"backupVaultAccountId"`: The account ID of the specified backup vault. """ function get_recovery_point_restore_metadata( backupVaultName, recoveryPointArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -1847,7 +1816,7 @@ BackupVaultAccountId is an optional parameter. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"BackupVaultAccountId"`: This is the account ID of the specified backup vault. +- `"BackupVaultAccountId"`: The account ID of the specified backup vault. """ function get_restore_testing_inferred_metadata( BackupVaultName, RecoveryPointArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -2005,12 +1974,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys accounts can use the value ANY to return job counts from every account in the organization. AGGREGATE_ALL aggregates job counts from all accounts within the authenticated organization, then returns the sum. -- `"AggregationPeriod"`: This is the period that sets the boundaries for returned results. - Acceptable values include ONE_DAY for daily job count for the prior 14 days. - SEVEN_DAYS for the aggregated job count for the prior 7 days. FOURTEEN_DAYS for - aggregated job count for prior 14 days. -- `"MaxResults"`: This parameter sets the maximum number of items to be returned. The value - is an integer. Range of accepted values is from 1 to 500. +- `"AggregationPeriod"`: The period for the returned results. ONE_DAY - The daily job + count for the prior 14 days. SEVEN_DAYS - The aggregated job count for the prior 7 days. + FOURTEEN_DAYS - The aggregated job count for prior 14 days. +- `"MaxResults"`: The maximum number of items to be returned. The value is an integer. + Range of accepted values is from 1 to 500. - `"MessageCategory"`: This parameter returns the job count for the specified message category. Example accepted strings include AccessDenied, Success, and InvalidParameters. See Monitoring for a list of accepted MessageCategory strings. The the value ANY returns @@ -2070,8 +2038,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys returns all jobs across the organization. - `"backupVaultName"`: Returns only backup jobs that will be stored in the specified backup vault. Backup vaults are identified by names that are unique to the account used to create - them and the Amazon Web Services Region where they are created. They consist of lowercase - letters, numbers, and hyphens. + them and the Amazon Web Services Region where they are created. - `"completeAfter"`: Returns only backup jobs completed after a date expressed in Unix format and Coordinated Universal Time (UTC). - `"completeBefore"`: Returns only backup jobs completed before a date expressed in Unix @@ -2094,10 +2061,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Amazon Aurora CloudFormation for CloudFormation DocumentDB for Amazon DocumentDB (with MongoDB compatibility) DynamoDB for Amazon DynamoDB EBS for Amazon Elastic Block Store EC2 for Amazon Elastic Compute Cloud EFS for Amazon Elastic File System - FSx for Amazon FSx Neptune for Amazon Neptune Redshift for Amazon Redshift RDS - for Amazon Relational Database Service SAP HANA on Amazon EC2 for SAP HANA databases - Storage Gateway for Storage Gateway S3 for Amazon S3 Timestream for Amazon Timestream - VirtualMachine for virtual machines + FSx for Amazon FSx Neptune for Amazon Neptune RDS for Amazon Relational Database + Service Redshift for Amazon Redshift S3 for Amazon Simple Storage Service (Amazon S3) + SAP HANA on Amazon EC2 for SAP HANA databases on Amazon Elastic Compute Cloud instances + Storage Gateway for Storage Gateway Timestream for Amazon Timestream VirtualMachine + for VMware virtual machines - `"state"`: Returns only backup jobs that are in the specified state. Completed with issues is a status found only in the Backup console. For API, this status refers to jobs with a state of COMPLETED and a MessageCategory with a value other than SUCCESS; that is, @@ -2126,12 +2094,11 @@ end list_backup_plan_templates() list_backup_plan_templates(params::Dict{String,<:Any}) -Returns metadata of your saved backup plan templates, including the template ID, name, and -the creation and deletion dates. +Lists the backup plan templates. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: The maximum number of items to be returned. +- `"maxResults"`: The maximum number of items to return. - `"nextToken"`: The next item following a partial list of returned items. For example, if a request is made to return MaxResults number of items, NextToken allows you to return more items in your list starting at the location pointed to by the next token. @@ -2201,9 +2168,7 @@ end list_backup_plans() list_backup_plans(params::Dict{String,<:Any}) -Returns a list of all active backup plans for an authenticated account. The list contains -information such as Amazon Resource Names (ARNs), plan IDs, creation and deletion dates, -version IDs, plan names, and creator request IDs. +Lists the active backup plans for the account. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2322,9 +2287,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys accounts can use the value ANY to return job counts from every account in the organization. AGGREGATE_ALL aggregates job counts from all accounts within the authenticated organization, then returns the sum. -- `"AggregationPeriod"`: This is the period that sets the boundaries for returned results. - ONE_DAY for daily job count for the prior 14 days. SEVEN_DAYS for the aggregated job - count for the prior 7 days. FOURTEEN_DAYS for aggregated job count for prior 14 days. +- `"AggregationPeriod"`: The period for the returned results. ONE_DAY - The daily job + count for the prior 14 days. SEVEN_DAYS - The aggregated job count for the prior 7 days. + FOURTEEN_DAYS - The aggregated job count for prior 14 days. - `"MaxResults"`: This parameter sets the maximum number of items to be returned. The value is an integer. Range of accepted values is from 1 to 500. - `"MessageCategory"`: This parameter returns the job count for the specified message @@ -2383,7 +2348,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"createdBefore"`: Returns only copy jobs that were created before the specified date. - `"destinationVaultArn"`: An Amazon Resource Name (ARN) that uniquely identifies a source backup vault to copy from; for example, - arn:aws:backup:us-east-1:123456789012:vault:aBackupVault. + arn:aws:backup:us-east-1:123456789012:backup-vault:aBackupVault. - `"maxResults"`: The maximum number of items to be returned. - `"messageCategory"`: This is an optional parameter that can be used to filter out jobs with a MessageCategory which matches the value you input. Example strings may include @@ -2400,10 +2365,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Amazon Aurora CloudFormation for CloudFormation DocumentDB for Amazon DocumentDB (with MongoDB compatibility) DynamoDB for Amazon DynamoDB EBS for Amazon Elastic Block Store EC2 for Amazon Elastic Compute Cloud EFS for Amazon Elastic File System - FSx for Amazon FSx Neptune for Amazon Neptune Redshift for Amazon Redshift RDS - for Amazon Relational Database Service SAP HANA on Amazon EC2 for SAP HANA databases - Storage Gateway for Storage Gateway S3 for Amazon S3 Timestream for Amazon Timestream - VirtualMachine for virtual machines + FSx for Amazon FSx Neptune for Amazon Neptune RDS for Amazon Relational Database + Service Redshift for Amazon Redshift S3 for Amazon Simple Storage Service (Amazon S3) + SAP HANA on Amazon EC2 for SAP HANA databases on Amazon Elastic Compute Cloud instances + Storage Gateway for Storage Gateway Timestream for Amazon Timestream VirtualMachine + for VMware virtual machines - `"state"`: Returns only copy jobs that are in the specified state. """ function list_copy_jobs(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -2514,13 +2480,13 @@ end This request lists the protected resources corresponding to each backup vault. # Arguments -- `backup_vault_name`: This is the list of protected resources by backup vault within the - vault(s) you specify by name. +- `backup_vault_name`: The list of protected resources by backup vault within the vault(s) + you specify by name. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"backupVaultAccountId"`: This is the list of protected resources by backup vault within - the vault(s) you specify by account ID. +- `"backupVaultAccountId"`: The list of protected resources by backup vault within the + vault(s) you specify by account ID. - `"maxResults"`: The maximum number of items to be returned. - `"nextToken"`: The next item following a partial list of returned items. For example, if a request is made to return MaxResults number of items, NextToken allows you to return more @@ -2559,9 +2525,8 @@ Returns detailed information about the recovery points stored in a backup vault. # Arguments - `backup_vault_name`: The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the - Amazon Web Services Region where they are created. They consist of lowercase letters, - numbers, and hyphens. Backup vault name might not be available when a supported service - creates the backup. + Amazon Web Services Region where they are created. Backup vault name might not be + available when a supported service creates the backup. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2584,10 +2549,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Aurora for Amazon Aurora CloudFormation for CloudFormation DocumentDB for Amazon DocumentDB (with MongoDB compatibility) DynamoDB for Amazon DynamoDB EBS for Amazon Elastic Block Store EC2 for Amazon Elastic Compute Cloud EFS for Amazon Elastic File - System FSx for Amazon FSx Neptune for Amazon Neptune Redshift for Amazon Redshift - RDS for Amazon Relational Database Service SAP HANA on Amazon EC2 for SAP HANA - databases Storage Gateway for Storage Gateway S3 for Amazon S3 Timestream for - Amazon Timestream VirtualMachine for virtual machines + System FSx for Amazon FSx Neptune for Amazon Neptune RDS for Amazon Relational + Database Service Redshift for Amazon Redshift S3 for Amazon Simple Storage Service + (Amazon S3) SAP HANA on Amazon EC2 for SAP HANA databases on Amazon Elastic Compute + Cloud instances Storage Gateway for Storage Gateway Timestream for Amazon Timestream + VirtualMachine for VMware virtual machines """ function list_recovery_points_by_backup_vault( backupVaultName; aws_config::AbstractAWSConfig=global_aws_config() @@ -2620,14 +2586,14 @@ end This action returns recovery point ARNs (Amazon Resource Names) of the specified legal hold. # Arguments -- `legal_hold_id`: This is the ID of the legal hold. +- `legal_hold_id`: The ID of the legal hold. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: This is the maximum number of resource list items to be returned. -- `"nextToken"`: This is the next item following a partial list of returned resources. For - example, if a request is made to return MaxResults number of resources, NextToken allows - you to return more items in your list starting at the location pointed to by the next token. +- `"maxResults"`: The maximum number of resource list items to be returned. +- `"nextToken"`: The next item following a partial list of returned resources. For example, + if a request is made to return MaxResults number of resources, NextToken allows you to + return more items in your list starting at the location pointed to by the next token. """ function list_recovery_points_by_legal_hold( legalHoldId; aws_config::AbstractAWSConfig=global_aws_config() @@ -2657,9 +2623,9 @@ end list_recovery_points_by_resource(resource_arn) list_recovery_points_by_resource(resource_arn, params::Dict{String,<:Any}) -Returns detailed information about all the recovery points of the type specified by a -resource Amazon Resource Name (ARN). For Amazon EFS and Amazon EC2, this action only lists -recovery points created by Backup. +The information about the recovery points of the type specified by a resource Amazon +Resource Name (ARN). For Amazon EFS and Amazon EC2, this action only lists recovery points +created by Backup. # Arguments - `resource_arn`: An ARN that uniquely identifies a resource. The format of the ARN depends @@ -2789,10 +2755,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys accounts can use the value ANY to return job counts from every account in the organization. AGGREGATE_ALL aggregates job counts from all accounts within the authenticated organization, then returns the sum. -- `"AggregationPeriod"`: This is the period that sets the boundaries for returned results. - Acceptable values include ONE_DAY for daily job count for the prior 14 days. - SEVEN_DAYS for the aggregated job count for the prior 7 days. FOURTEEN_DAYS for - aggregated job count for prior 14 days. +- `"AggregationPeriod"`: The period for the returned results. ONE_DAY - The daily job + count for the prior 14 days. SEVEN_DAYS - The aggregated job count for the prior 7 days. + FOURTEEN_DAYS - The aggregated job count for prior 14 days. - `"MaxResults"`: This parameter sets the maximum number of items to be returned. The value is an integer. Range of accepted values is from 1 to 500. - `"NextToken"`: The next item following a partial list of returned resources. For example, @@ -2853,10 +2818,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys resources: Aurora for Amazon Aurora CloudFormation for CloudFormation DocumentDB for Amazon DocumentDB (with MongoDB compatibility) DynamoDB for Amazon DynamoDB EBS for Amazon Elastic Block Store EC2 for Amazon Elastic Compute Cloud EFS for Amazon - Elastic File System FSx for Amazon FSx Neptune for Amazon Neptune Redshift for - Amazon Redshift RDS for Amazon Relational Database Service SAP HANA on Amazon EC2 for - SAP HANA databases Storage Gateway for Storage Gateway S3 for Amazon S3 Timestream - for Amazon Timestream VirtualMachine for virtual machines + Elastic File System FSx for Amazon FSx Neptune for Amazon Neptune RDS for Amazon + Relational Database Service Redshift for Amazon Redshift S3 for Amazon Simple Storage + Service (Amazon S3) SAP HANA on Amazon EC2 for SAP HANA databases on Amazon Elastic + Compute Cloud instances Storage Gateway for Storage Gateway Timestream for Amazon + Timestream VirtualMachine for VMware virtual machines - `"restoreTestingPlanArn"`: This returns only restore testing jobs that match the specified resource Amazon Resource Name (ARN). - `"status"`: Returns only restore jobs associated with the specified job status. @@ -3005,10 +2971,8 @@ end list_tags(resource_arn) list_tags(resource_arn, params::Dict{String,<:Any}) -Returns a list of key-value pairs assigned to a target recovery point, backup plan, or -backup vault. ListTags only works for resource types that support full Backup management -of their backups. Those resource types are listed in the \"Full Backup management\" section -of the Feature availability by resource table. +Returns the tags assigned to the resource, such as a target recovery point, backup plan, or +backup vault. # Arguments - `resource_arn`: An Amazon Resource Name (ARN) that uniquely identifies a resource. The @@ -3054,8 +3018,7 @@ vault. Requires a backup vault name and an access policy document in JSON format # Arguments - `backup_vault_name`: The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the - Amazon Web Services Region where they are created. They consist of lowercase letters, - numbers, and hyphens. + Amazon Web Services Region where they are created. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -3096,7 +3059,8 @@ stored in a backup vault. If specified, Vault Lock enforces a minimum and maximu period for future backup and copy jobs that target a backup vault. Backup Vault Lock has been assessed by Cohasset Associates for use in environments that are subject to SEC 17a-4, CFTC, and FINRA regulations. For more information about how Backup Vault Lock relates to -these regulations, see the Cohasset Associates Compliance Assessment. +these regulations, see the Cohasset Associates Compliance Assessment. For more +information, see Backup Vault Lock. # Arguments - `backup_vault_name`: The Backup Vault Lock configuration that specifies the name of the @@ -3131,14 +3095,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"MinRetentionDays"`: The Backup Vault Lock configuration that specifies the minimum retention period that the vault retains its recovery points. This setting can be useful if, for example, your organization's policies require you to retain certain data for at least - seven years (2555 days). If this parameter is not specified, Vault Lock will not enforce a - minimum retention period. If this parameter is specified, any backup or copy job to the - vault must have a lifecycle policy with a retention period equal to or longer than the - minimum retention period. If the job's retention period is shorter than that minimum - retention period, then the vault fails that backup or copy job, and you should either - modify your lifecycle settings or use a different vault. The shortest minimum retention - period you can specify is 1 day. Recovery points already saved in the vault prior to Vault - Lock are not affected. + seven years (2555 days). This parameter is required when a vault lock is created through + CloudFormation; otherwise, this parameter is optional. If this parameter is not specified, + Vault Lock will not enforce a minimum retention period. If this parameter is specified, any + backup or copy job to the vault must have a lifecycle policy with a retention period equal + to or longer than the minimum retention period. If the job's retention period is shorter + than that minimum retention period, then the vault fails that backup or copy job, and you + should either modify your lifecycle settings or use a different vault. The shortest minimum + retention period you can specify is 1 day. Recovery points already saved in the vault prior + to Vault Lock are not affected. """ function put_backup_vault_lock_configuration( backupVaultName; aws_config::AbstractAWSConfig=global_aws_config() @@ -3176,16 +3141,15 @@ Turns on notifications on a backup vault for the specified topic and events. to track Backup events. The following events are supported: BACKUP_JOB_STARTED | BACKUP_JOB_COMPLETED COPY_JOB_STARTED | COPY_JOB_SUCCESSFUL | COPY_JOB_FAILED RESTORE_JOB_STARTED | RESTORE_JOB_COMPLETED | RECOVERY_POINT_MODIFIED - S3_BACKUP_OBJECT_FAILED | S3_RESTORE_OBJECT_FAILED The list below shows items that are - deprecated events (for reference) and are no longer in use. They are no longer supported - and will not return statuses or notifications. Refer to the list above for current + S3_BACKUP_OBJECT_FAILED | S3_RESTORE_OBJECT_FAILED The list below includes both + supported events and deprecated events that are no longer in use (for reference). + Deprecated events do not return statuses or notifications. Refer to the list above for the supported events. - `snstopic_arn`: The Amazon Resource Name (ARN) that specifies the topic for a backup vault’s events; for example, arn:aws:sns:us-west-2:111122223333:MyVaultTopic. - `backup_vault_name`: The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the - Amazon Web Services Region where they are created. They consist of lowercase letters, - numbers, and hyphens. + Amazon Web Services Region where they are created. """ function put_backup_vault_notifications( @@ -3237,7 +3201,7 @@ RestoreJobId and ValidationStatus are required. Optionally, you can input a ValidationStatusMessage. # Arguments -- `validation_status`: This is the status of your restore validation. +- `validation_status`: The status of your restore validation. - `restore_job_id`: This is a unique identifier of a restore job within Backup. # Optional Parameters @@ -3284,8 +3248,7 @@ Starts an on-demand backup job for the specified resource. # Arguments - `backup_vault_name`: The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the - Amazon Web Services Region where they are created. They consist of lowercase letters, - numbers, and hyphens. + Amazon Web Services Region where they are created. - `iam_role_arn`: Specifies the IAM role ARN used to create the target recovery point; for example, arn:aws:iam::123456789012:role/S3Access. - `resource_arn`: An Amazon Resource Name (ARN) that uniquely identifies a resource. The @@ -3293,11 +3256,11 @@ Starts an on-demand backup job for the specified resource. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"BackupOptions"`: Specifies the backup option for a selected resource. This option is - only available for Windows Volume Shadow Copy Service (VSS) backup jobs. Valid values: Set - to \"WindowsVSS\":\"enabled\" to enable the WindowsVSS backup option and create a Windows - VSS backup. Set to \"WindowsVSS\"\"disabled\" to create a regular backup. The WindowsVSS - option is not enabled by default. +- `"BackupOptions"`: The backup option for a selected resource. This option is only + available for Windows Volume Shadow Copy Service (VSS) backup jobs. Valid values: Set to + \"WindowsVSS\":\"enabled\" to enable the WindowsVSS backup option and create a Windows VSS + backup. Set to \"WindowsVSS\"\"disabled\" to create a regular backup. The WindowsVSS option + is not enabled by default. - `"CompleteWindowMinutes"`: A value in minutes during which a successfully started backup must complete, or else Backup will cancel the job. This value is optional. This value begins counting down from when the backup was scheduled. It does not add additional time @@ -3312,12 +3275,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys stored in cold storage for a minimum of 90 days. Therefore, the “retention” setting must be 90 days greater than the “transition to cold after days” setting. The “transition to cold after days” setting cannot be changed after a backup has been - transitioned to cold. Resource types that are able to be transitioned to cold storage are - listed in the \"Lifecycle to cold storage\" section of the Feature availability by - resource table. Backup ignores this expression for other resource types. This parameter has - a maximum value of 100 years (36,500 days). -- `"RecoveryPointTags"`: To help organize your resources, you can assign your own metadata - to the resources that you create. Each tag is a key-value pair. + transitioned to cold. Resource types that can transition to cold storage are listed in the + Feature availability by resource table. Backup ignores this expression for other resource + types. This parameter has a maximum value of 100 years (36,500 days). +- `"RecoveryPointTags"`: The tags to assign to the resources. - `"StartWindowMinutes"`: A value in minutes after a backup is scheduled before a job will be canceled if it doesn't start successfully. This value is optional, and the default is 8 hours. If this value is included, it must be at least 60 minutes to avoid errors. This @@ -3383,7 +3344,7 @@ continuous backups. # Arguments - `destination_backup_vault_arn`: An Amazon Resource Name (ARN) that uniquely identifies a destination backup vault to copy to; for example, - arn:aws:backup:us-east-1:123456789012:vault:aBackupVault. + arn:aws:backup:us-east-1:123456789012:backup-vault:aBackupVault. - `iam_role_arn`: Specifies the IAM role ARN used to copy the target recovery point; for example, arn:aws:iam::123456789012:role/S3Access. - `recovery_point_arn`: An ARN that uniquely identifies a recovery point to use for the @@ -3391,8 +3352,7 @@ continuous backups. arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45. - `source_backup_vault_name`: The name of a logical source container where backups are stored. Backup vaults are identified by names that are unique to the account used to create - them and the Amazon Web Services Region where they are created. They consist of lowercase - letters, numbers, and hyphens. + them and the Amazon Web Services Region where they are created. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -3498,25 +3458,17 @@ end Recovers the saved resource identified by an Amazon Resource Name (ARN). # Arguments -- `metadata`: A set of metadata key-value pairs. Contains information, such as a resource - name, required to restore a recovery point. You can get configuration metadata about a +- `metadata`: A set of metadata key-value pairs. You can get configuration metadata about a resource at the time it was backed up by calling GetRecoveryPointRestoreMetadata. However, values in addition to those provided by GetRecoveryPointRestoreMetadata might be required to restore a resource. For example, you might need to provide a new resource name if the - original already exists. You need to specify specific metadata to restore an Amazon Elastic - File System (Amazon EFS) instance: file-system-id: The ID of the Amazon EFS file system - that is backed up by Backup. Returned in GetRecoveryPointRestoreMetadata. Encrypted: A - Boolean value that, if true, specifies that the file system is encrypted. If KmsKeyId is - specified, Encrypted must be set to true. KmsKeyId: Specifies the Amazon Web Services - KMS key that is used to encrypt the restored file system. You can specify a key from - another Amazon Web Services account provided that key it is properly shared with your - account via Amazon Web Services KMS. PerformanceMode: Specifies the throughput mode of - the file system. CreationToken: A user-supplied value that ensures the uniqueness - (idempotency) of the request. newFileSystem: A Boolean value that, if true, specifies - that the recovery point is restored to a new Amazon EFS file system. ItemsToRestore: An - array of one to five strings where each string is a file path. Use ItemsToRestore to - restore specific files or directories rather than the entire file system. This parameter is - optional. For example, \"itemsToRestore\":\"[\"/my.test\"]\". + original already exists. For more information about the metadata for each resource, see the + following: Metadata for Amazon Aurora Metadata for Amazon DocumentDB Metadata + for CloudFormation Metadata for Amazon DynamoDB Metadata for Amazon EBS + Metadata for Amazon EC2 Metadata for Amazon EFS Metadata for Amazon FSx + Metadata for Amazon Neptune Metadata for Amazon RDS Metadata for Amazon Redshift + Metadata for Storage Gateway Metadata for Amazon S3 Metadata for Amazon Timestream + Metadata for virtual machines - `recovery_point_arn`: An ARN that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45. @@ -3531,13 +3483,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys otherwise identical calls to StartRestoreJob. Retrying a successful request with the same idempotency token results in a success message with no action taken. - `"ResourceType"`: Starts a job to restore a recovery point for one of the following - resources: Aurora for Amazon Aurora DocumentDB for Amazon DocumentDB (with MongoDB - compatibility) CloudFormation for CloudFormation DynamoDB for Amazon DynamoDB EBS - for Amazon Elastic Block Store EC2 for Amazon Elastic Compute Cloud EFS for Amazon - Elastic File System FSx for Amazon FSx Neptune for Amazon Neptune RDS for Amazon - Relational Database Service Redshift for Amazon Redshift Storage Gateway for Storage - Gateway S3 for Amazon S3 Timestream for Amazon Timestream VirtualMachine for - virtual machines + resources: Aurora - Amazon Aurora DocumentDB - Amazon DocumentDB CloudFormation - + CloudFormation DynamoDB - Amazon DynamoDB EBS - Amazon Elastic Block Store EC2 - + Amazon Elastic Compute Cloud EFS - Amazon Elastic File System FSx - Amazon FSx + Neptune - Amazon Neptune RDS - Amazon Relational Database Service Redshift - Amazon + Redshift Storage Gateway - Storage Gateway S3 - Amazon Simple Storage Service + Timestream - Amazon Timestream VirtualMachine - Virtual machines """ function start_restore_job( Metadata, RecoveryPointArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -3579,7 +3530,7 @@ end Attempts to cancel a job to create a one-time backup of a resource. This action is not supported for the following services: Amazon FSx for Windows File Server, Amazon FSx for -Lustre, Amazon FSx for NetApp ONTAP , Amazon FSx for OpenZFS, Amazon DocumentDB (with +Lustre, Amazon FSx for NetApp ONTAP, Amazon FSx for OpenZFS, Amazon DocumentDB (with MongoDB compatibility), Amazon RDS, Amazon Aurora, and Amazon Neptune. # Arguments @@ -3613,14 +3564,19 @@ end tag_resource(tags, resource_arn, params::Dict{String,<:Any}) Assigns a set of key-value pairs to a recovery point, backup plan, or backup vault -identified by an Amazon Resource Name (ARN). +identified by an Amazon Resource Name (ARN). This API is supported for recovery points for +resource types including Aurora, Amazon DocumentDB. Amazon EBS, Amazon FSx, Neptune, and +Amazon RDS. # Arguments - `tags`: Key-value pairs that are used to help organize your resources. You can assign your own metadata to the resources you create. For clarity, this is the structure to assign tags: [{\"Key\":\"string\",\"Value\":\"string\"}]. - `resource_arn`: An ARN that uniquely identifies a resource. The format of the ARN depends - on the type of the tagged resource. + on the type of the tagged resource. ARNs that do not include backup are incompatible with + tagging. TagResource and UntagResource with invalid ARNs will result in an error. + Acceptable ARN content can include arn:aws:backup:us-east. Invalid ARN content may look + like arn:aws:ec2:us-east. """ function tag_resource(Tags, resourceArn; aws_config::AbstractAWSConfig=global_aws_config()) @@ -3652,12 +3608,17 @@ end untag_resource(tag_key_list, resource_arn, params::Dict{String,<:Any}) Removes a set of key-value pairs from a recovery point, backup plan, or backup vault -identified by an Amazon Resource Name (ARN) +identified by an Amazon Resource Name (ARN) This API is not supported for recovery points +for resource types including Aurora, Amazon DocumentDB. Amazon EBS, Amazon FSx, Neptune, +and Amazon RDS. # Arguments -- `tag_key_list`: A list of keys to identify which key-value tags to remove from a resource. +- `tag_key_list`: The keys to identify which key-value tags to remove from a resource. - `resource_arn`: An ARN that uniquely identifies a resource. The format of the ARN depends - on the type of the tagged resource. + on the type of the tagged resource. ARNs that do not include backup are incompatible with + tagging. TagResource and UntagResource with invalid ARNs will result in an error. + Acceptable ARN content can include arn:aws:backup:us-east. Invalid ARN content may look + like arn:aws:ec2:us-east. """ function untag_resource( @@ -3692,13 +3653,12 @@ end update_backup_plan(backup_plan, backup_plan_id) update_backup_plan(backup_plan, backup_plan_id, params::Dict{String,<:Any}) -Updates an existing backup plan identified by its backupPlanId with the input document in -JSON format. The new version is uniquely identified by a VersionId. +Updates the specified backup plan. The new version is uniquely identified by its ID. # Arguments -- `backup_plan`: Specifies the body of a backup plan. Includes a BackupPlanName and one or - more sets of Rules. -- `backup_plan_id`: Uniquely identifies a backup plan. +- `backup_plan`: The body of a backup plan. Includes a BackupPlanName and one or more sets + of Rules. +- `backup_plan_id`: The ID of the backup plan. """ function update_backup_plan( @@ -3733,8 +3693,7 @@ end update_framework(framework_name) update_framework(framework_name, params::Dict{String,<:Any}) -Updates an existing framework identified by its FrameworkName with the input document in -JSON format. +Updates the specified framework. # Arguments - `framework_name`: The unique name of a framework. This name is between 1 and 256 @@ -3743,8 +3702,8 @@ JSON format. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"FrameworkControls"`: A list of the controls that make up the framework. Each control in - the list has a name, input parameters, and scope. +- `"FrameworkControls"`: The controls that make up the framework. Each control in the list + has a name, input parameters, and scope. - `"FrameworkDescription"`: An optional description of the framework with a maximum 1,024 characters. - `"IdempotencyToken"`: A customer-chosen string that you can use to distinguish between @@ -3815,20 +3774,21 @@ end Sets the transition lifecycle of a recovery point. The lifecycle defines when a protected resource is transitioned to cold storage and when it expires. Backup transitions and -expires backups automatically according to the lifecycle that you define. Backups -transitioned to cold storage must be stored in cold storage for a minimum of 90 days. -Therefore, the “retention” setting must be 90 days greater than the “transition to -cold after days” setting. The “transition to cold after days” setting cannot be -changed after a backup has been transitioned to cold. Resource types that are able to be -transitioned to cold storage are listed in the \"Lifecycle to cold storage\" section of the - Feature availability by resource table. Backup ignores this expression for other resource -types. This operation does not support continuous backups. +expires backups automatically according to the lifecycle that you define. Resource types +that can transition to cold storage are listed in the Feature availability by resource +table. Backup ignores this expression for other resource types. Backups transitioned to +cold storage must be stored in cold storage for a minimum of 90 days. Therefore, the +“retention” setting must be 90 days greater than the “transition to cold after +days” setting. The “transition to cold after days” setting cannot be changed after a +backup has been transitioned to cold. If your lifecycle currently uses the parameters +DeleteAfterDays and MoveToColdStorageAfterDays, include these parameters and their values +when you call this operation. Not including them may result in your plan updating with null +values. This operation does not support continuous backups. # Arguments - `backup_vault_name`: The name of a logical container where backups are stored. Backup vaults are identified by names that are unique to the account used to create them and the - Amazon Web Services Region where they are created. They consist of lowercase letters, - numbers, and hyphens. + Amazon Web Services Region where they are created. - `recovery_point_arn`: An Amazon Resource Name (ARN) that uniquely identifies a recovery point; for example, arn:aws:backup:us-east-1:123456789012:recovery-point:1EB3B5E7-9EB0-435A-A80B-108B488B0D45. @@ -3909,8 +3869,7 @@ end update_report_plan(report_plan_name) update_report_plan(report_plan_name, params::Dict{String,<:Any}) -Updates an existing report plan identified by its ReportPlanName with the input document in -JSON format. +Updates the specified report plan. # Arguments - `report_plan_name`: The unique name of the report plan. This name is between 1 and 256 @@ -3922,16 +3881,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"IdempotencyToken"`: A customer-chosen string that you can use to distinguish between otherwise identical calls to UpdateReportPlanInput. Retrying a successful request with the same idempotency token results in a success message with no action taken. -- `"ReportDeliveryChannel"`: A structure that contains information about where to deliver - your reports, specifically your Amazon S3 bucket name, S3 key prefix, and the formats of - your reports. +- `"ReportDeliveryChannel"`: The information about where to deliver your reports, + specifically your Amazon S3 bucket name, S3 key prefix, and the formats of your reports. - `"ReportPlanDescription"`: An optional description of the report plan with a maximum 1,024 characters. -- `"ReportSetting"`: Identifies the report template for the report. Reports are built using - a report template. The report templates are: RESOURCE_COMPLIANCE_REPORT | - CONTROL_COMPLIANCE_REPORT | BACKUP_JOB_REPORT | COPY_JOB_REPORT | RESTORE_JOB_REPORT If - the report template is RESOURCE_COMPLIANCE_REPORT or CONTROL_COMPLIANCE_REPORT, this API - resource also describes the report coverage by Amazon Web Services Regions and frameworks. +- `"ReportSetting"`: The report template for the report. Reports are built using a report + template. The report templates are: RESOURCE_COMPLIANCE_REPORT | CONTROL_COMPLIANCE_REPORT + | BACKUP_JOB_REPORT | COPY_JOB_REPORT | RESTORE_JOB_REPORT If the report template is + RESOURCE_COMPLIANCE_REPORT or CONTROL_COMPLIANCE_REPORT, this API resource also describes + the report coverage by Amazon Web Services Regions and frameworks. """ function update_report_plan( reportPlanName; aws_config::AbstractAWSConfig=global_aws_config() @@ -3973,7 +3931,7 @@ SelectionWindowDays # Arguments - `restore_testing_plan`: Specifies the body of a restore testing plan. -- `restore_testing_plan_name`: This is the restore testing plan name you wish to update. +- `restore_testing_plan_name`: The name of the restore testing plan name. """ function update_restore_testing_plan( @@ -4012,10 +3970,9 @@ end update_restore_testing_selection(restore_testing_plan_name, restore_testing_selection, restore_testing_selection_name) update_restore_testing_selection(restore_testing_plan_name, restore_testing_selection, restore_testing_selection_name, params::Dict{String,<:Any}) -Most elements except the RestoreTestingSelectionName can be updated with this request. -RestoreTestingSelection can use either protected resource ARNs or conditions, but not both. -That is, if your selection has ProtectedResourceArns, requesting an update with the -parameter ProtectedResourceConditions will be unsuccessful. +Updates the specified restore testing selection. Most elements except the +RestoreTestingSelectionName can be updated with this request. You can use either protected +resource ARNs or conditions, but not both. # Arguments - `restore_testing_plan_name`: The restore testing plan name is required to update the @@ -4024,8 +3981,8 @@ parameter ProtectedResourceConditions will be unsuccessful. protected resource ARNs or conditions, but not both. That is, if your selection has ProtectedResourceArns, requesting an update with the parameter ProtectedResourceConditions will be unsuccessful. -- `restore_testing_selection_name`: This is the required restore testing selection name of - the restore testing selection you wish to update. +- `restore_testing_selection_name`: The required restore testing selection name of the + restore testing selection you wish to update. """ function update_restore_testing_selection( diff --git a/src/services/batch.jl b/src/services/batch.jl index a9a4cb5563..33e5033cc7 100644 --- a/src/services/batch.jl +++ b/src/services/batch.jl @@ -8,15 +8,14 @@ using AWS.UUIDs cancel_job(job_id, reason) cancel_job(job_id, reason, params::Dict{String,<:Any}) -Cancels a job in an Batch job queue. Jobs that are in the SUBMITTED or PENDING are -canceled. A job inRUNNABLE remains in RUNNABLE until it reaches the head of the job queue. -Then the job status is updated to FAILED. A PENDING job is canceled after all dependency -jobs are completed. Therefore, it may take longer than expected to cancel a job in PENDING -status. When you try to cancel an array parent job in PENDING, Batch attempts to cancel all -child jobs. The array parent job is canceled when all child jobs are completed. Jobs that -progressed to the STARTING or RUNNING state aren't canceled. However, the API operation -still succeeds, even if no job is canceled. These jobs must be terminated with the -TerminateJob operation. +Cancels a job in an Batch job queue. Jobs that are in a SUBMITTED, PENDING, or RUNNABLE +state are cancelled and the job status is updated to FAILED. A PENDING job is canceled +after all dependency jobs are completed. Therefore, it may take longer than expected to +cancel a job in PENDING status. When you try to cancel an array parent job in PENDING, +Batch attempts to cancel all child jobs. The array parent job is canceled when all child +jobs are completed. Jobs that progressed to the STARTING or RUNNING state aren't canceled. +However, the API operation still succeeds, even if no job is canceled. These jobs must be +terminated with the TerminateJob operation. # Arguments - `job_id`: The Batch job ID of the job to cancel. @@ -122,6 +121,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"computeResources"`: Details about the compute resources managed by the compute environment. This parameter is required for managed compute environments. For more information, see Compute Environments in the Batch User Guide. +- `"context"`: Reserved. - `"eksConfiguration"`: The details for the Amazon EKS cluster that supports the compute environment. - `"serviceRole"`: The full Amazon Resource Name (ARN) of the IAM role that allows Batch to @@ -1245,6 +1245,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"computeResources"`: Details of the compute resources managed by the compute environment. Required for a managed compute environment. For more information, see Compute Environments in the Batch User Guide. +- `"context"`: Reserved. - `"serviceRole"`: The full Amazon Resource Name (ARN) of the IAM role that allows Batch to make calls to other Amazon Web Services services on your behalf. For more information, see Batch service IAM role in the Batch User Guide. If the compute environment has a diff --git a/src/services/bedrock.jl b/src/services/bedrock.jl index d4d5f6aad1..634d0ef5bd 100644 --- a/src/services/bedrock.jl +++ b/src/services/bedrock.jl @@ -4,13 +4,52 @@ using AWS.AWSServices: bedrock using AWS.Compat using AWS.UUIDs +""" + batch_delete_evaluation_job(job_identifiers) + batch_delete_evaluation_job(job_identifiers, params::Dict{String,<:Any}) + +Creates a batch deletion job. A model evaluation job can only be deleted if it has +following status FAILED, COMPLETED, and STOPPED. You can request up to 25 model evaluation +jobs be deleted in a single request. + +# Arguments +- `job_identifiers`: An array of model evaluation job ARNs to be deleted. + +""" +function batch_delete_evaluation_job( + jobIdentifiers; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "POST", + "/evaluation-jobs/batch-delete", + Dict{String,Any}("jobIdentifiers" => jobIdentifiers); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_delete_evaluation_job( + jobIdentifiers, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "POST", + "/evaluation-jobs/batch-delete", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("jobIdentifiers" => jobIdentifiers), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_evaluation_job(evaluation_config, inference_config, job_name, output_data_config, role_arn) create_evaluation_job(evaluation_config, inference_config, job_name, output_data_config, role_arn, params::Dict{String,<:Any}) API operation for creating and managing Amazon Bedrock automatic model evaluation jobs and model evaluation jobs that use human workers. To learn more about the requirements for -creating a model evaluation job see, Model evaluations. +creating a model evaluation job see, Model evaluation. # Arguments - `evaluation_config`: Specifies whether the model evaluation job is automatic or uses @@ -98,22 +137,20 @@ end create_guardrail(blocked_input_messaging, blocked_outputs_messaging, name) create_guardrail(blocked_input_messaging, blocked_outputs_messaging, name, params::Dict{String,<:Any}) -Creates a guardrail to block topics and to filter out harmful content. Specify a name and -optional description. Specify messages for when the guardrail successfully blocks a -prompt or a model response in the blockedInputMessaging and blockedOutputsMessaging fields. - Specify topics for the guardrail to deny in the topicPolicyConfig object. Each -GuardrailTopicConfig object in the topicsConfig list pertains to one topic. Give a name -and description so that the guardrail can properly identify the topic. Specify DENY in -the type field. (Optional) Provide up to five prompts that you would categorize as -belonging to the topic in the examples list. Specify filter strengths for the harmful -categories defined in Amazon Bedrock in the contentPolicyConfig object. Each -GuardrailContentFilterConfig object in the filtersConfig list pertains to a harmful -category. For more information, see Content filters. For more information about the fields -in a content filter, see GuardrailContentFilterConfig. Specify the category in the type -field. Specify the strength of the filter for prompts in the inputStrength field and for -model responses in the strength field of the GuardrailContentFilterConfig. (Optional) -For security, include the ARN of a KMS key in the kmsKeyId field. (Optional) Attach any -tags to the guardrail in the tags object. For more information, see Tag resources. +Creates a guardrail to block topics and to implement safeguards for your generative AI +applications. You can configure the following policies in a guardrail to avoid undesirable +and harmful content, filter out denied topics and words, and remove sensitive information +for privacy protection. Content filters - Adjust filter strengths to block input prompts +or model responses containing harmful content. Denied topics - Define a set of topics +that are undesirable in the context of your application. These topics will be blocked if +detected in user queries or model responses. Word filters - Configure filters to block +undesirable words, phrases, and profanity. Such words can include offensive terms, +competitor names etc. Sensitive information filters - Block or mask sensitive +information such as personally identifiable information (PII) or custom regex in user +inputs and model responses. In addition to the above policies, you can also configure the +messages to be returned to the user if a user input or model response is in violation of +the policies defined in the guardrail. For more information, see Guardrails for Amazon +Bedrock in the Amazon Bedrock User Guide. # Arguments - `blocked_input_messaging`: The message to return when the guardrail blocks a prompt. @@ -128,6 +165,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency in the Amazon S3 User Guide. - `"contentPolicyConfig"`: The content filter policies to configure for the guardrail. +- `"contextualGroundingPolicyConfig"`: The contextual grounding policy configuration used + to create a guardrail. - `"description"`: A description of the guardrail. - `"kmsKeyId"`: The ARN of the KMS key that you use to encrypt the guardrail. - `"sensitiveInformationPolicyConfig"`: The sensitive information policy to configure for @@ -191,7 +230,8 @@ you are satisfied with a configuration, or to compare the configuration with ano version. # Arguments -- `guardrail_identifier`: The unique identifier of the guardrail. +- `guardrail_identifier`: The unique identifier of the guardrail. This can be an ID or the + ARN. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -230,6 +270,67 @@ function create_guardrail_version( ) end +""" + create_model_copy_job(source_model_arn, target_model_name) + create_model_copy_job(source_model_arn, target_model_name, params::Dict{String,<:Any}) + +Copies a model to another region so that it can be used there. For more information, see +Copy models to be used in other regions in the Amazon Bedrock User Guide. + +# Arguments +- `source_model_arn`: The Amazon Resource Name (ARN) of the model to be copied. +- `target_model_name`: A name for the copied model. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientRequestToken"`: A unique, case-sensitive identifier to ensure that the API + request completes no more than one time. If this token matches a previous request, Amazon + Bedrock ignores the request, but does not return an error. For more information, see + Ensuring idempotency. +- `"modelKmsKeyId"`: The ARN of the KMS key that you use to encrypt the model copy. +- `"targetModelTags"`: Tags to associate with the target model. For more information, see + Tag resources in the Amazon Bedrock User Guide. +""" +function create_model_copy_job( + sourceModelArn, targetModelName; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "POST", + "/model-copy-jobs", + Dict{String,Any}( + "sourceModelArn" => sourceModelArn, + "targetModelName" => targetModelName, + "clientRequestToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_model_copy_job( + sourceModelArn, + targetModelName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "POST", + "/model-copy-jobs", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "sourceModelArn" => sourceModelArn, + "targetModelName" => targetModelName, + "clientRequestToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_model_customization_job(base_model_identifier, custom_model_name, hyper_parameters, job_name, output_data_config, role_arn, training_data_config) create_model_customization_job(base_model_identifier, custom_model_name, hyper_parameters, job_name, output_data_config, role_arn, training_data_config, params::Dict{String,<:Any}) @@ -334,6 +435,163 @@ function create_model_customization_job( ) end +""" + create_model_import_job(imported_model_name, job_name, model_data_source, role_arn) + create_model_import_job(imported_model_name, job_name, model_data_source, role_arn, params::Dict{String,<:Any}) + +Creates a model import job to import model that you have customized in other environments, +such as Amazon SageMaker. For more information, see Import a customized model + +# Arguments +- `imported_model_name`: The name of the imported model. +- `job_name`: The name of the import job. +- `model_data_source`: The data source for the imported model. +- `role_arn`: The Amazon Resource Name (ARN) of the model import job. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientRequestToken"`: A unique, case-sensitive identifier to ensure that the API + request completes no more than one time. If this token matches a previous request, Amazon + Bedrock ignores the request, but does not return an error. For more information, see + Ensuring idempotency. +- `"importedModelKmsKeyId"`: The imported model is encrypted at rest using this key. +- `"importedModelTags"`: Tags to attach to the imported model. +- `"jobTags"`: Tags to attach to this import job. +- `"vpcConfig"`: VPC configuration parameters for the private Virtual Private Cloud (VPC) + that contains the resources you are using for the import job. +""" +function create_model_import_job( + importedModelName, + jobName, + modelDataSource, + roleArn; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "POST", + "/model-import-jobs", + Dict{String,Any}( + "importedModelName" => importedModelName, + "jobName" => jobName, + "modelDataSource" => modelDataSource, + "roleArn" => roleArn, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_model_import_job( + importedModelName, + jobName, + modelDataSource, + roleArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "POST", + "/model-import-jobs", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "importedModelName" => importedModelName, + "jobName" => jobName, + "modelDataSource" => modelDataSource, + "roleArn" => roleArn, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_model_invocation_job(input_data_config, job_name, model_id, output_data_config, role_arn) + create_model_invocation_job(input_data_config, job_name, model_id, output_data_config, role_arn, params::Dict{String,<:Any}) + +Creates a batch inference job to invoke a model on multiple prompts. Format your data +according to Format your inference data and upload it to an Amazon S3 bucket. For more +information, see Process multiple prompts with batch inference. The response returns a +jobArn that you can use to stop or get details about the job. + +# Arguments +- `input_data_config`: Details about the location of the input to the batch inference job. +- `job_name`: A name to give the batch inference job. +- `model_id`: The unique identifier of the foundation model to use for the batch inference + job. +- `output_data_config`: Details about the location of the output of the batch inference job. +- `role_arn`: The Amazon Resource Name (ARN) of the service role with permissions to carry + out and manage batch inference. You can use the console to create a default service role or + follow the steps at Create a service role for batch inference. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientRequestToken"`: A unique, case-sensitive identifier to ensure that the API + request completes no more than one time. If this token matches a previous request, Amazon + Bedrock ignores the request, but does not return an error. For more information, see + Ensuring idempotency. +- `"tags"`: Any tags to associate with the batch inference job. For more information, see + Tagging Amazon Bedrock resources. +- `"timeoutDurationInHours"`: The number of hours after which to force the batch inference + job to time out. +""" +function create_model_invocation_job( + inputDataConfig, + jobName, + modelId, + outputDataConfig, + roleArn; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "POST", + "/model-invocation-job", + Dict{String,Any}( + "inputDataConfig" => inputDataConfig, + "jobName" => jobName, + "modelId" => modelId, + "outputDataConfig" => outputDataConfig, + "roleArn" => roleArn, + "clientRequestToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_model_invocation_job( + inputDataConfig, + jobName, + modelId, + outputDataConfig, + roleArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "POST", + "/model-invocation-job", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "inputDataConfig" => inputDataConfig, + "jobName" => jobName, + "modelId" => modelId, + "outputDataConfig" => outputDataConfig, + "roleArn" => roleArn, + "clientRequestToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_provisioned_model_throughput(model_id, model_units, provisioned_model_name) create_provisioned_model_throughput(model_id, model_units, provisioned_model_name, params::Dict{String,<:Any}) @@ -461,7 +719,8 @@ guardrailIdentifier field. If you delete a guardrail, all of its versions will b guardrailIdentifier field and the version in the guardrailVersion field. # Arguments -- `guardrail_identifier`: The unique identifier of the guardrail. +- `guardrail_identifier`: The unique identifier of the guardrail. This can be an ID or the + ARN. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -491,6 +750,41 @@ function delete_guardrail( ) end +""" + delete_imported_model(model_identifier) + delete_imported_model(model_identifier, params::Dict{String,<:Any}) + +Deletes a custom model that you imported earlier. For more information, see Import a +customized model in the Amazon Bedrock User Guide. + +# Arguments +- `model_identifier`: Name of the imported model to delete. + +""" +function delete_imported_model( + modelIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "DELETE", + "/imported-models/$(modelIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_imported_model( + modelIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "DELETE", + "/imported-models/$(modelIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_model_invocation_logging_configuration() delete_model_invocation_logging_configuration(params::Dict{String,<:Any}) @@ -597,7 +891,7 @@ end get_evaluation_job(job_identifier, params::Dict{String,<:Any}) Retrieves the properties associated with a model evaluation job, including the status of -the job. For more information, see Model evaluations. +the job. For more information, see Model evaluation. # Arguments - `job_identifier`: The Amazon Resource Name (ARN) of the model evaluation job. @@ -670,6 +964,7 @@ details for the DRAFT version. # Arguments - `guardrail_identifier`: The unique identifier of the guardrail for which to get details. + This can be an ID or the ARN. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -700,6 +995,106 @@ function get_guardrail( ) end +""" + get_imported_model(model_identifier) + get_imported_model(model_identifier, params::Dict{String,<:Any}) + +Gets properties associated with a customized model you imported. + +# Arguments +- `model_identifier`: Name or Amazon Resource Name (ARN) of the imported model. + +""" +function get_imported_model( + modelIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "GET", + "/imported-models/$(modelIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_imported_model( + modelIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "GET", + "/imported-models/$(modelIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_inference_profile(inference_profile_identifier) + get_inference_profile(inference_profile_identifier, params::Dict{String,<:Any}) + +Gets information about an inference profile. For more information, see the Amazon Bedrock +User Guide. + +# Arguments +- `inference_profile_identifier`: The unique identifier of the inference profile. + +""" +function get_inference_profile( + inferenceProfileIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "GET", + "/inference-profiles/$(inferenceProfileIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_inference_profile( + inferenceProfileIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "GET", + "/inference-profiles/$(inferenceProfileIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_model_copy_job(job_arn) + get_model_copy_job(job_arn, params::Dict{String,<:Any}) + +Retrieves information about a model copy job. For more information, see Copy models to be +used in other regions in the Amazon Bedrock User Guide. + +# Arguments +- `job_arn`: The Amazon Resource Name (ARN) of the model copy job. + +""" +function get_model_copy_job(jobArn; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock( + "GET", + "/model-copy-jobs/$(jobArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_model_copy_job( + jobArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "GET", + "/model-copy-jobs/$(jobArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_model_customization_job(job_identifier) get_model_customization_job(job_identifier, params::Dict{String,<:Any}) @@ -735,6 +1130,76 @@ function get_model_customization_job( ) end +""" + get_model_import_job(job_identifier) + get_model_import_job(job_identifier, params::Dict{String,<:Any}) + +Retrieves the properties associated with import model job, including the status of the job. +For more information, see Import a customized model in the Amazon Bedrock User Guide. + +# Arguments +- `job_identifier`: The identifier of the import job. + +""" +function get_model_import_job( + jobIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "GET", + "/model-import-jobs/$(jobIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_model_import_job( + jobIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "GET", + "/model-import-jobs/$(jobIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_model_invocation_job(job_identifier) + get_model_invocation_job(job_identifier, params::Dict{String,<:Any}) + +Gets details about a batch inference job. For more information, see View details about a +batch inference job + +# Arguments +- `job_identifier`: The Amazon Resource Name (ARN) of the batch inference job. + +""" +function get_model_invocation_job( + jobIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "GET", + "/model-invocation-job/$(jobIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_model_invocation_job( + jobIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "GET", + "/model-invocation-job/$(jobIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_model_invocation_logging_configuration() get_model_invocation_logging_configuration(params::Dict{String,<:Any}) @@ -816,10 +1281,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"creationTimeBefore"`: Return custom models created before the specified time. - `"foundationModelArnEquals"`: Return custom models only if the foundation model Amazon Resource Name (ARN) matches this parameter. -- `"maxResults"`: Maximum number of results to return in the response. +- `"isOwned"`: Return custom models depending on if the current account owns them (true) or + if they were shared with the current account (false). +- `"maxResults"`: The maximum number of results to return in the response. If the total + number of results is greater than this value, use the token returned in the response in the + nextToken field when making another request to return the next batch of results. - `"nameContains"`: Return custom models only if the job name contains these characters. -- `"nextToken"`: Continuation token from the previous response, for Amazon Bedrock to list - the next set of results. +- `"nextToken"`: If the total number of results is greater than the maxResults value + provided in the request, enter the token returned in the nextToken field in the response in + this field to return the next batch of results. - `"sortBy"`: The field to sort by in the returned list of models. - `"sortOrder"`: The sort order of the results. """ @@ -924,7 +1394,8 @@ another ListGuardrails request to see the next batch of results. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"guardrailIdentifier"`: The unique identifier of the guardrail. +- `"guardrailIdentifier"`: The unique identifier of the guardrail. This can be an ID or the + ARN. - `"maxResults"`: The maximum number of results to return in the response. - `"nextToken"`: If there are more results than were returned in the response, the response returns a nextToken that you can send in another ListGuardrails request to see the next @@ -943,6 +1414,123 @@ function list_guardrails( ) end +""" + list_imported_models() + list_imported_models(params::Dict{String,<:Any}) + +Returns a list of models you've imported. You can filter the results to return based on one +or more criteria. For more information, see Import a customized model in the Amazon Bedrock +User Guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"creationTimeAfter"`: Return imported models that were created after the specified time. +- `"creationTimeBefore"`: Return imported models that created before the specified time. +- `"maxResults"`: The maximum number of results to return in the response. If the total + number of results is greater than this value, use the token returned in the response in the + nextToken field when making another request to return the next batch of results. +- `"nameContains"`: Return imported models only if the model name contains these characters. +- `"nextToken"`: If the total number of results is greater than the maxResults value + provided in the request, enter the token returned in the nextToken field in the response in + this field to return the next batch of results. +- `"sortBy"`: The field to sort by in the returned list of imported models. +- `"sortOrder"`: Specifies whetehr to sort the results in ascending or descending order. +""" +function list_imported_models(; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock( + "GET", "/imported-models"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_imported_models( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "GET", + "/imported-models", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_inference_profiles() + list_inference_profiles(params::Dict{String,<:Any}) + +Returns a list of inference profiles that you can use. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return in the response. If the total + number of results is greater than this value, use the token returned in the response in the + nextToken field when making another request to return the next batch of results. +- `"nextToken"`: If the total number of results is greater than the maxResults value + provided in the request, enter the token returned in the nextToken field in the response in + this field to return the next batch of results. +""" +function list_inference_profiles(; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock( + "GET", "/inference-profiles"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_inference_profiles( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "GET", + "/inference-profiles", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_model_copy_jobs() + list_model_copy_jobs(params::Dict{String,<:Any}) + +Returns a list of model copy jobs that you have submitted. You can filter the jobs to +return based on one or more criteria. For more information, see Copy models to be used in +other regions in the Amazon Bedrock User Guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"creationTimeAfter"`: Filters for model copy jobs created after the specified time. +- `"creationTimeBefore"`: Filters for model copy jobs created before the specified time. +- `"maxResults"`: The maximum number of results to return in the response. If the total + number of results is greater than this value, use the token returned in the response in the + nextToken field when making another request to return the next batch of results. +- `"nextToken"`: If the total number of results is greater than the maxResults value + provided in the request, enter the token returned in the nextToken field in the response in + this field to return the next batch of results. +- `"outputModelNameContains"`: Filters for model copy jobs in which the name of the copied + model contains the string that you specify. +- `"sortBy"`: The field to sort by in the returned list of model copy jobs. +- `"sortOrder"`: Specifies whether to sort the results in ascending or descending order. +- `"sourceAccountEquals"`: Filters for model copy jobs in which the account that the source + model belongs to is equal to the value that you specify. +- `"sourceModelArnEquals"`: Filters for model copy jobs in which the Amazon Resource Name + (ARN) of the source model to is equal to the value that you specify. +- `"statusEquals"`: Filters for model copy jobs whose status matches the value that you + specify. +""" +function list_model_copy_jobs(; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock( + "GET", "/model-copy-jobs"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_model_copy_jobs( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "GET", + "/model-copy-jobs", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_model_customization_jobs() list_model_customization_jobs(params::Dict{String,<:Any}) @@ -955,11 +1543,14 @@ Amazon Bedrock User Guide. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"creationTimeAfter"`: Return customization jobs created after the specified time. - `"creationTimeBefore"`: Return customization jobs created before the specified time. -- `"maxResults"`: Maximum number of results to return in the response. +- `"maxResults"`: The maximum number of results to return in the response. If the total + number of results is greater than this value, use the token returned in the response in the + nextToken field when making another request to return the next batch of results. - `"nameContains"`: Return customization jobs only if the job name contains these characters. -- `"nextToken"`: Continuation token from the previous response, for Amazon Bedrock to list - the next set of results. +- `"nextToken"`: If the total number of results is greater than the maxResults value + provided in the request, enter the token returned in the nextToken field in the response in + this field to return the next batch of results. - `"sortBy"`: The field to sort by in the returned list of jobs. - `"sortOrder"`: The sort order of the results. - `"statusEquals"`: Return customization jobs with the specified status. @@ -984,6 +1575,93 @@ function list_model_customization_jobs( ) end +""" + list_model_import_jobs() + list_model_import_jobs(params::Dict{String,<:Any}) + +Returns a list of import jobs you've submitted. You can filter the results to return based +on one or more criteria. For more information, see Import a customized model in the Amazon +Bedrock User Guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"creationTimeAfter"`: Return import jobs that were created after the specified time. +- `"creationTimeBefore"`: Return import jobs that were created before the specified time. +- `"maxResults"`: The maximum number of results to return in the response. If the total + number of results is greater than this value, use the token returned in the response in the + nextToken field when making another request to return the next batch of results. +- `"nameContains"`: Return imported jobs only if the job name contains these characters. +- `"nextToken"`: If the total number of results is greater than the maxResults value + provided in the request, enter the token returned in the nextToken field in the response in + this field to return the next batch of results. +- `"sortBy"`: The field to sort by in the returned list of imported jobs. +- `"sortOrder"`: Specifies whether to sort the results in ascending or descending order. +- `"statusEquals"`: Return imported jobs with the specified status. +""" +function list_model_import_jobs(; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock( + "GET", "/model-import-jobs"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_model_import_jobs( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "GET", + "/model-import-jobs", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_model_invocation_jobs() + list_model_invocation_jobs(params::Dict{String,<:Any}) + +Lists all batch inference jobs in the account. For more information, see View details about +a batch inference job. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return. If there are more results than + the number that you specify, a nextToken value is returned. Use the nextToken in a request + to return the next batch of results. +- `"nameContains"`: Specify a string to filter for batch inference jobs whose names contain + the string. +- `"nextToken"`: If there were more results than the value you specified in the maxResults + field in a previous ListModelInvocationJobs request, the response would have returned a + nextToken value. To see the next batch of results, send the nextToken value in another + request. +- `"sortBy"`: An attribute by which to sort the results. +- `"sortOrder"`: Specifies whether to sort the results by ascending or descending order. +- `"statusEquals"`: Specify a status to filter for batch inference jobs whose statuses + match the string you specify. +- `"submitTimeAfter"`: Specify a time to filter for batch inference jobs that were + submitted after the time you specify. +- `"submitTimeBefore"`: Specify a time to filter for batch inference jobs that were + submitted before the time you specify. +""" +function list_model_invocation_jobs(; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock( + "GET", + "/model-invocation-jobs"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_model_invocation_jobs( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "GET", + "/model-invocation-jobs", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_provisioned_model_throughputs() list_provisioned_model_throughputs(params::Dict{String,<:Any}) @@ -1178,6 +1856,41 @@ function stop_model_customization_job( ) end +""" + stop_model_invocation_job(job_identifier) + stop_model_invocation_job(job_identifier, params::Dict{String,<:Any}) + +Stops a batch inference job. You're only charged for tokens that were already processed. +For more information, see Stop a batch inference job. + +# Arguments +- `job_identifier`: The Amazon Resource Name (ARN) of the batch inference job to stop. + +""" +function stop_model_invocation_job( + jobIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock( + "POST", + "/model-invocation-job/$(jobIdentifier)/stop"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_model_invocation_job( + jobIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock( + "POST", + "/model-invocation-job/$(jobIdentifier)/stop", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) @@ -1282,19 +1995,21 @@ filters. For more information about the fields in a content filter, see GuardrailContentFilterConfig. Specify the category in the type field. Specify the strength of the filter for prompts in the inputStrength field and for model responses in the strength field of the GuardrailContentFilterConfig. (Optional) For security, -include the ARN of a KMS key in the kmsKeyId field. (Optional) Attach any tags to the -guardrail in the tags object. For more information, see Tag resources. +include the ARN of a KMS key in the kmsKeyId field. # Arguments - `blocked_input_messaging`: The message to return when the guardrail blocks a prompt. - `blocked_outputs_messaging`: The message to return when the guardrail blocks a model response. -- `guardrail_identifier`: The unique identifier of the guardrail +- `guardrail_identifier`: The unique identifier of the guardrail. This can be an ID or the + ARN. - `name`: A name for the guardrail. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"contentPolicyConfig"`: The content policy to configure for the guardrail. +- `"contextualGroundingPolicyConfig"`: The contextual grounding policy configuration used + to update a guardrail. - `"description"`: A description of the guardrail. - `"kmsKeyId"`: The ARN of the KMS key with which to encrypt the guardrail. - `"sensitiveInformationPolicyConfig"`: The sensitive information policy to configure for diff --git a/src/services/bedrock_agent.jl b/src/services/bedrock_agent.jl index 5e087e7dd7..47a89f6b62 100644 --- a/src/services/bedrock_agent.jl +++ b/src/services/bedrock_agent.jl @@ -79,11 +79,15 @@ Resource Name (ARN) of the role with permissions to invoke API operations on an (Optional) customerEncryptionKeyArn – The Amazon Resource Name (ARN) of a KMS key to encrypt the creation of the agent. (Optional) idleSessionTTLinSeconds – Specify the number of seconds for which the agent should maintain session information. After this time -expires, the subsequent InvokeAgent request begins a new session. To override the -default prompt behavior for agent orchestration and to use advanced prompts, include a -promptOverrideConfiguration object. For more information, see Advanced prompts. If you +expires, the subsequent InvokeAgent request begins a new session. To enable your agent +to retain conversational context across multiple sessions, include a memoryConfiguration +object. For more information, see Configure memory. To override the default prompt +behavior for agent orchestration and to use advanced prompts, include a +promptOverrideConfiguration object. For more information, see Advanced prompts. If your agent fails to be created, the response returns a list of failureReasons alongside a list -of recommendedActions for you to troubleshoot. +of recommendedActions for you to troubleshoot. The agent instructions will not be honored +if your agent has only one knowledge base, uses default prompts, has no action group, and +user input is disabled. # Arguments - `agent_name`: A name for the agent that you create. @@ -109,6 +113,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys expires and Amazon Bedrock deletes any data provided before the timeout. - `"instruction"`: Instructions that tell the agent what it should do and how it should interact with users. +- `"memoryConfiguration"`: Contains the details of the memory configured for the agent. - `"promptOverrideConfiguration"`: Contains configurations to override prompts in different parts of an agent sequence. For more information, see Advanced prompts. - `"tags"`: Any tags that you want to attach to the agent. @@ -152,11 +157,13 @@ Creates an action group for an agent. An action group represents the actions tha can carry out for the customer by defining the APIs that an agent can call and the logic for calling them. To allow your agent to request the user for additional information when trying to complete a task, add an action group with the parentActionGroupSignature field -set to AMAZON.UserInput. You must leave the description, apiSchema, and actionGroupExecutor -fields blank for this action group. During orchestration, if your agent determines that it -needs to invoke an API in an action group, but doesn't have enough information to complete -the API request, it will invoke this action group instead and return an Observation -reprompting the user for more information. +set to AMAZON.UserInput. To allow your agent to generate, run, and troubleshoot code when +trying to complete a task, add an action group with the parentActionGroupSignature field +set to AMAZON.CodeInterpreter. You must leave the description, apiSchema, and +actionGroupExecutor fields blank for this action group. During orchestration, if your agent +determines that it needs to invoke an API in an action group, but doesn't have enough +information to complete the API request, it will invoke this action group instead and +return an Observation reprompting the user for more information. # Arguments - `action_group_name`: The name to give the action group. @@ -183,10 +190,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"parentActionGroupSignature"`: To allow your agent to request the user for additional information when trying to complete a task, set this field to AMAZON.UserInput. You must leave the description, apiSchema, and actionGroupExecutor fields blank for this action - group. During orchestration, if your agent determines that it needs to invoke an API in an - action group, but doesn't have enough information to complete the API request, it will - invoke this action group instead and return an Observation reprompting the user for more - information. + group. To allow your agent to generate, run, and troubleshoot code when trying to complete + a task, set this field to AMAZON.CodeInterpreter. You must leave the description, + apiSchema, and actionGroupExecutor fields blank for this action group. During + orchestration, if your agent determines that it needs to invoke an API in an action group, + but doesn't have enough information to complete the API request, it will invoke this action + group instead and return an Observation reprompting the user for more information. """ function create_agent_action_group( actionGroupName, @@ -288,11 +297,11 @@ end create_data_source(data_source_configuration, knowledge_base_id, name) create_data_source(data_source_configuration, knowledge_base_id, name, params::Dict{String,<:Any}) -Sets up a data source to be added to a knowledge base. You can't change the -chunkingConfiguration after you create the data source. +Creates a data source connector for a knowledge base. You can't change the +chunkingConfiguration after you create the data source connector. # Arguments -- `data_source_configuration`: Contains metadata about where the data source is stored. +- `data_source_configuration`: The connection configuration for the data source. - `knowledge_base_id`: The unique identifier of the knowledge base to which to add the data source. - `name`: The name of the data source. @@ -303,7 +312,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys completes no more than one time. If this token matches a previous request, Amazon Bedrock ignores the request, but does not return an error. For more information, see Ensuring idempotency. -- `"dataDeletionPolicy"`: The data deletion policy assigned to the data source. +- `"dataDeletionPolicy"`: The data deletion policy for the data source. You can set the + data deletion policy to: DELETE: Deletes all data from your data source that’s + converted into vector embeddings upon deletion of a knowledge base or data source resource. + Note that the vector store itself is not deleted, only the data. This flag is ignored if an + Amazon Web Services account is deleted. RETAIN: Retains all data from your data source + that’s converted into vector embeddings upon deletion of a knowledge base or data source + resource. Note that the vector store itself is not deleted if you delete a knowledge base + or data source resource. - `"description"`: A description of the data source. - `"serverSideEncryptionConfiguration"`: Contains details about the server-side encryption for the data source. @@ -354,6 +370,185 @@ function create_data_source( ) end +""" + create_flow(execution_role_arn, name) + create_flow(execution_role_arn, name, params::Dict{String,<:Any}) + +Creates a prompt flow that you can use to send an input through various steps to yield an +output. Configure nodes, each of which corresponds to a step of the flow, and create +connections between the nodes to create paths to different outputs. For more information, +see How it works and Create a flow in Amazon Bedrock in the Amazon Bedrock User Guide. + +# Arguments +- `execution_role_arn`: The Amazon Resource Name (ARN) of the service role with permissions + to create and manage a flow. For more information, see Create a service role for flows in + Amazon Bedrock in the Amazon Bedrock User Guide. +- `name`: A name for the flow. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier to ensure that the API request + completes no more than one time. If this token matches a previous request, Amazon Bedrock + ignores the request, but does not return an error. For more information, see Ensuring + idempotency. +- `"customerEncryptionKeyArn"`: The Amazon Resource Name (ARN) of the KMS key to encrypt + the flow. +- `"definition"`: A definition of the nodes and connections between nodes in the flow. +- `"description"`: A description for the flow. +- `"tags"`: Any tags that you want to attach to the flow. For more information, see Tagging + resources in Amazon Bedrock. +""" +function create_flow( + executionRoleArn, name; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "POST", + "/flows/", + Dict{String,Any}( + "executionRoleArn" => executionRoleArn, + "name" => name, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_flow( + executionRoleArn, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "POST", + "/flows/", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "executionRoleArn" => executionRoleArn, + "name" => name, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_flow_alias(flow_identifier, name, routing_configuration) + create_flow_alias(flow_identifier, name, routing_configuration, params::Dict{String,<:Any}) + +Creates an alias of a flow for deployment. For more information, see Deploy a flow in +Amazon Bedrock in the Amazon Bedrock User Guide. + +# Arguments +- `flow_identifier`: The unique identifier of the flow for which to create an alias. +- `name`: A name for the alias. +- `routing_configuration`: Contains information about the version to which to map the alias. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier to ensure that the API request + completes no more than one time. If this token matches a previous request, Amazon Bedrock + ignores the request, but does not return an error. For more information, see Ensuring + idempotency. +- `"description"`: A description for the alias. +- `"tags"`: Any tags that you want to attach to the alias of the flow. For more + information, see Tagging resources in Amazon Bedrock. +""" +function create_flow_alias( + flowIdentifier, + name, + routingConfiguration; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "POST", + "/flows/$(flowIdentifier)/aliases", + Dict{String,Any}( + "name" => name, + "routingConfiguration" => routingConfiguration, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_flow_alias( + flowIdentifier, + name, + routingConfiguration, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "POST", + "/flows/$(flowIdentifier)/aliases", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "name" => name, + "routingConfiguration" => routingConfiguration, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_flow_version(flow_identifier) + create_flow_version(flow_identifier, params::Dict{String,<:Any}) + +Creates a version of the flow that you can deploy. For more information, see Deploy a flow +in Amazon Bedrock in the Amazon Bedrock User Guide. + +# Arguments +- `flow_identifier`: The unique identifier of the flow that you want to create a version of. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier to ensure that the API request + completes no more than one time. If this token matches a previous request, Amazon Bedrock + ignores the request, but does not return an error. For more information, see Ensuring + idempotency. +- `"description"`: A description of the version of the flow. +""" +function create_flow_version( + flowIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "POST", + "/flows/$(flowIdentifier)/versions", + Dict{String,Any}("clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_flow_version( + flowIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "POST", + "/flows/$(flowIdentifier)/versions", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("clientToken" => string(uuid4())), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_knowledge_base(knowledge_base_configuration, name, role_arn, storage_configuration) create_knowledge_base(knowledge_base_configuration, name, role_arn, storage_configuration, params::Dict{String,<:Any}) @@ -445,6 +640,108 @@ function create_knowledge_base( ) end +""" + create_prompt(name) + create_prompt(name, params::Dict{String,<:Any}) + +Creates a prompt in your prompt library that you can add to a flow. For more information, +see Prompt management in Amazon Bedrock, Create a prompt using Prompt management and Prompt +flows in Amazon Bedrock in the Amazon Bedrock User Guide. + +# Arguments +- `name`: A name for the prompt. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier to ensure that the API request + completes no more than one time. If this token matches a previous request, Amazon Bedrock + ignores the request, but does not return an error. For more information, see Ensuring + idempotency. +- `"customerEncryptionKeyArn"`: The Amazon Resource Name (ARN) of the KMS key to encrypt + the prompt. +- `"defaultVariant"`: The name of the default variant for the prompt. This value must match + the name field in the relevant PromptVariant object. +- `"description"`: A description for the prompt. +- `"tags"`: Any tags that you want to attach to the prompt. For more information, see + Tagging resources in Amazon Bedrock. +- `"variants"`: A list of objects, each containing details about a variant of the prompt. +""" +function create_prompt(name; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock_agent( + "POST", + "/prompts/", + Dict{String,Any}("name" => name, "clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_prompt( + name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "POST", + "/prompts/", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("name" => name, "clientToken" => string(uuid4())), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_prompt_version(prompt_identifier) + create_prompt_version(prompt_identifier, params::Dict{String,<:Any}) + +Creates a static snapshot of your prompt that can be deployed to production. For more +information, see Deploy prompts using Prompt management by creating versions in the Amazon +Bedrock User Guide. + +# Arguments +- `prompt_identifier`: The unique identifier of the prompt that you want to create a + version of. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier to ensure that the API request + completes no more than one time. If this token matches a previous request, Amazon Bedrock + ignores the request, but does not return an error. For more information, see Ensuring + idempotency. +- `"description"`: A description for the version of the prompt. +- `"tags"`: Any tags that you want to attach to the version of the prompt. For more + information, see Tagging resources in Amazon Bedrock. +""" +function create_prompt_version( + promptIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "POST", + "/prompts/$(promptIdentifier)/versions", + Dict{String,Any}("clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_prompt_version( + promptIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "POST", + "/prompts/$(promptIdentifier)/versions", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("clientToken" => string(uuid4())), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_agent(agent_id) delete_agent(agent_id, params::Dict{String,<:Any}) @@ -637,6 +934,120 @@ function delete_data_source( ) end +""" + delete_flow(flow_identifier) + delete_flow(flow_identifier, params::Dict{String,<:Any}) + +Deletes a flow. + +# Arguments +- `flow_identifier`: The unique identifier of the flow. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"skipResourceInUseCheck"`: By default, this value is false and deletion is stopped if + the resource is in use. If you set it to true, the resource will be deleted even if the + resource is in use. +""" +function delete_flow(flowIdentifier; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock_agent( + "DELETE", + "/flows/$(flowIdentifier)/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_flow( + flowIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "DELETE", + "/flows/$(flowIdentifier)/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_flow_alias(alias_identifier, flow_identifier) + delete_flow_alias(alias_identifier, flow_identifier, params::Dict{String,<:Any}) + +Deletes an alias of a flow. + +# Arguments +- `alias_identifier`: The unique identifier of the alias to be deleted. +- `flow_identifier`: The unique identifier of the flow that the alias belongs to. + +""" +function delete_flow_alias( + aliasIdentifier, flowIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "DELETE", + "/flows/$(flowIdentifier)/aliases/$(aliasIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_flow_alias( + aliasIdentifier, + flowIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "DELETE", + "/flows/$(flowIdentifier)/aliases/$(aliasIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_flow_version(flow_identifier, flow_version) + delete_flow_version(flow_identifier, flow_version, params::Dict{String,<:Any}) + +Deletes a version of a flow. + +# Arguments +- `flow_identifier`: The unique identifier of the flow whose version that you want to delete +- `flow_version`: The version of the flow that you want to delete. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"skipResourceInUseCheck"`: By default, this value is false and deletion is stopped if + the resource is in use. If you set it to true, the resource will be deleted even if the + resource is in use. +""" +function delete_flow_version( + flowIdentifier, flowVersion; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "DELETE", + "/flows/$(flowIdentifier)/versions/$(flowVersion)/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_flow_version( + flowIdentifier, + flowVersion, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "DELETE", + "/flows/$(flowIdentifier)/versions/$(flowVersion)/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_knowledge_base(knowledge_base_id) delete_knowledge_base(knowledge_base_id, params::Dict{String,<:Any}) @@ -673,6 +1084,45 @@ function delete_knowledge_base( ) end +""" + delete_prompt(prompt_identifier) + delete_prompt(prompt_identifier, params::Dict{String,<:Any}) + +Deletes a prompt or a version of it, depending on whether you include the promptVersion +field or not. For more information, see Delete prompts from the Prompt management tool and +Delete a version of a prompt from the Prompt management tool in the Amazon Bedrock User +Guide. + +# Arguments +- `prompt_identifier`: The unique identifier of the prompt. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"promptVersion"`: The version of the prompt to delete. To delete the prompt, omit this + field. +""" +function delete_prompt(promptIdentifier; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock_agent( + "DELETE", + "/prompts/$(promptIdentifier)/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_prompt( + promptIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "DELETE", + "/prompts/$(promptIdentifier)/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ disassociate_agent_knowledge_base(agent_id, agent_version, knowledge_base_id) disassociate_agent_knowledge_base(agent_id, agent_version, knowledge_base_id, params::Dict{String,<:Any}) @@ -926,7 +1376,114 @@ function get_data_source( ) return bedrock_agent( "GET", - "/knowledgebases/$(knowledgeBaseId)/datasources/$(dataSourceId)", + "/knowledgebases/$(knowledgeBaseId)/datasources/$(dataSourceId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_flow(flow_identifier) + get_flow(flow_identifier, params::Dict{String,<:Any}) + +Retrieves information about a flow. For more information, see Manage a flow in Amazon +Bedrock in the Amazon Bedrock User Guide. + +# Arguments +- `flow_identifier`: The unique identifier of the flow. + +""" +function get_flow(flowIdentifier; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock_agent( + "GET", + "/flows/$(flowIdentifier)/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_flow( + flowIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "GET", + "/flows/$(flowIdentifier)/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_flow_alias(alias_identifier, flow_identifier) + get_flow_alias(alias_identifier, flow_identifier, params::Dict{String,<:Any}) + +Retrieves information about a flow. For more information, see Deploy a flow in Amazon +Bedrock in the Amazon Bedrock User Guide. + +# Arguments +- `alias_identifier`: The unique identifier of the alias for which to retrieve information. +- `flow_identifier`: The unique identifier of the flow that the alias belongs to. + +""" +function get_flow_alias( + aliasIdentifier, flowIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "GET", + "/flows/$(flowIdentifier)/aliases/$(aliasIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_flow_alias( + aliasIdentifier, + flowIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "GET", + "/flows/$(flowIdentifier)/aliases/$(aliasIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_flow_version(flow_identifier, flow_version) + get_flow_version(flow_identifier, flow_version, params::Dict{String,<:Any}) + +Retrieves information about a version of a flow. For more information, see Deploy a flow in +Amazon Bedrock in the Amazon Bedrock User Guide. + +# Arguments +- `flow_identifier`: The unique identifier of the flow for which to get information. +- `flow_version`: The version of the flow for which to get information. + +""" +function get_flow_version( + flowIdentifier, flowVersion; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "GET", + "/flows/$(flowIdentifier)/versions/$(flowVersion)/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_flow_version( + flowIdentifier, + flowVersion, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "GET", + "/flows/$(flowIdentifier)/versions/$(flowVersion)/", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -1010,6 +1567,45 @@ function get_knowledge_base( ) end +""" + get_prompt(prompt_identifier) + get_prompt(prompt_identifier, params::Dict{String,<:Any}) + +Retrieves information about the working draft (DRAFT version) of a prompt or a version of +it, depending on whether you include the promptVersion field or not. For more information, +see View information about prompts using Prompt management and View information about a +version of your prompt in the Amazon Bedrock User Guide. + +# Arguments +- `prompt_identifier`: The unique identifier of the prompt. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"promptVersion"`: The version of the prompt about which you want to retrieve + information. Omit this field to return information about the working draft of the prompt. +""" +function get_prompt(promptIdentifier; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock_agent( + "GET", + "/prompts/$(promptIdentifier)/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_prompt( + promptIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "GET", + "/prompts/$(promptIdentifier)/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_agent_action_groups(agent_id, agent_version) list_agent_action_groups(agent_id, agent_version, params::Dict{String,<:Any}) @@ -1247,6 +1843,120 @@ function list_data_sources( ) end +""" + list_flow_aliases(flow_identifier) + list_flow_aliases(flow_identifier, params::Dict{String,<:Any}) + +Returns a list of aliases for a flow. + +# Arguments +- `flow_identifier`: The unique identifier of the flow for which aliases are being returned. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return in the response. If the total + number of results is greater than this value, use the token returned in the response in the + nextToken field when making another request to return the next batch of results. +- `"nextToken"`: If the total number of results is greater than the maxResults value + provided in the request, enter the token returned in the nextToken field in the response in + this field to return the next batch of results. +""" +function list_flow_aliases( + flowIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "GET", + "/flows/$(flowIdentifier)/aliases"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_flow_aliases( + flowIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "GET", + "/flows/$(flowIdentifier)/aliases", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_flow_versions(flow_identifier) + list_flow_versions(flow_identifier, params::Dict{String,<:Any}) + +Returns a list of information about each flow. For more information, see Deploy a flow in +Amazon Bedrock in the Amazon Bedrock User Guide. + +# Arguments +- `flow_identifier`: The unique identifier of the flow. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return in the response. If the total + number of results is greater than this value, use the token returned in the response in the + nextToken field when making another request to return the next batch of results. +- `"nextToken"`: If the total number of results is greater than the maxResults value + provided in the request, enter the token returned in the nextToken field in the response in + this field to return the next batch of results. +""" +function list_flow_versions( + flowIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "GET", + "/flows/$(flowIdentifier)/versions"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_flow_versions( + flowIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "GET", + "/flows/$(flowIdentifier)/versions", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_flows() + list_flows(params::Dict{String,<:Any}) + +Returns a list of flows and information about each flow. For more information, see Manage a +flow in Amazon Bedrock in the Amazon Bedrock User Guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return in the response. If the total + number of results is greater than this value, use the token returned in the response in the + nextToken field when making another request to return the next batch of results. +- `"nextToken"`: If the total number of results is greater than the maxResults value + provided in the request, enter the token returned in the nextToken field in the response in + this field to return the next batch of results. +""" +function list_flows(; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock_agent( + "GET", "/flows/"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_flows( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "GET", "/flows/", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + """ list_ingestion_jobs(data_source_id, knowledge_base_id) list_ingestion_jobs(data_source_id, knowledge_base_id, params::Dict{String,<:Any}) @@ -1327,6 +2037,39 @@ function list_knowledge_bases( ) end +""" + list_prompts() + list_prompts(params::Dict{String,<:Any}) + +Returns either information about the working draft (DRAFT version) of each prompt in an +account, or information about of all versions of a prompt, depending on whether you include +the promptIdentifier field or not. For more information, see View information about prompts +using Prompt management in the Amazon Bedrock User Guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return in the response. If the total + number of results is greater than this value, use the token returned in the response in the + nextToken field when making another request to return the next batch of results. +- `"nextToken"`: If the total number of results is greater than the maxResults value + provided in the request, enter the token returned in the nextToken field in the response in + this field to return the next batch of results. +- `"promptIdentifier"`: The unique identifier of the prompt for whose versions you want to + return information. Omit this field to list information about all prompts in an account. +""" +function list_prompts(; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock_agent( + "GET", "/prompts/"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_prompts( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "GET", "/prompts/", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + """ list_tags_for_resource(resource_arn) list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) @@ -1391,6 +2134,39 @@ function prepare_agent( ) end +""" + prepare_flow(flow_identifier) + prepare_flow(flow_identifier, params::Dict{String,<:Any}) + +Prepares the DRAFT version of a flow so that it can be invoked. For more information, see +Test a flow in Amazon Bedrock in the Amazon Bedrock User Guide. + +# Arguments +- `flow_identifier`: The unique identifier of the flow. + +""" +function prepare_flow(flowIdentifier; aws_config::AbstractAWSConfig=global_aws_config()) + return bedrock_agent( + "POST", + "/flows/$(flowIdentifier)/"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function prepare_flow( + flowIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "POST", + "/flows/$(flowIdentifier)/", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ start_ingestion_job(data_source_id, knowledge_base_id) start_ingestion_job(data_source_id, knowledge_base_id, params::Dict{String,<:Any}) @@ -1539,6 +2315,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys expires and Amazon Bedrock deletes any data provided before the timeout. - `"instruction"`: Specifies new instructions that tell the agent what it should do and how it should interact with users. +- `"memoryConfiguration"`: Specifies the new memory configuration for the agent. - `"promptOverrideConfiguration"`: Contains configurations to override prompts in different parts of an agent sequence. For more information, see Advanced prompts. """ @@ -1756,20 +2533,21 @@ end update_data_source(data_source_configuration, data_source_id, knowledge_base_id, name) update_data_source(data_source_configuration, data_source_id, knowledge_base_id, name, params::Dict{String,<:Any}) -Updates configurations for a data source. You can't change the chunkingConfiguration after -you create the data source. Specify the existing chunkingConfiguration. +Updates the configurations for a data source connector. You can't change the +chunkingConfiguration after you create the data source connector. Specify the existing +chunkingConfiguration. # Arguments -- `data_source_configuration`: Contains details about the storage configuration of the data - source. +- `data_source_configuration`: The connection configuration for the data source that you + want to update. - `data_source_id`: The unique identifier of the data source. -- `knowledge_base_id`: The unique identifier of the knowledge base to which the data source - belongs. +- `knowledge_base_id`: The unique identifier of the knowledge base for the data source. - `name`: Specifies a new name for the data source. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"dataDeletionPolicy"`: The data deletion policy of the updated data source. +- `"dataDeletionPolicy"`: The data deletion policy for the data source that you want to + update. - `"description"`: Specifies a new description for the data source. - `"serverSideEncryptionConfiguration"`: Contains details about server-side encryption of the data source. @@ -1818,6 +2596,123 @@ function update_data_source( ) end +""" + update_flow(execution_role_arn, flow_identifier, name) + update_flow(execution_role_arn, flow_identifier, name, params::Dict{String,<:Any}) + +Modifies a flow. Include both fields that you want to keep and fields that you want to +change. For more information, see How it works and Create a flow in Amazon Bedrock in the +Amazon Bedrock User Guide. + +# Arguments +- `execution_role_arn`: The Amazon Resource Name (ARN) of the service role with permissions + to create and manage a flow. For more information, see Create a service role for flows in + Amazon Bedrock in the Amazon Bedrock User Guide. +- `flow_identifier`: The unique identifier of the flow. +- `name`: A name for the flow. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"customerEncryptionKeyArn"`: The Amazon Resource Name (ARN) of the KMS key to encrypt + the flow. +- `"definition"`: A definition of the nodes and the connections between the nodes in the + flow. +- `"description"`: A description for the flow. +""" +function update_flow( + executionRoleArn, + flowIdentifier, + name; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/flows/$(flowIdentifier)/", + Dict{String,Any}("executionRoleArn" => executionRoleArn, "name" => name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_flow( + executionRoleArn, + flowIdentifier, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/flows/$(flowIdentifier)/", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("executionRoleArn" => executionRoleArn, "name" => name), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_flow_alias(alias_identifier, flow_identifier, name, routing_configuration) + update_flow_alias(alias_identifier, flow_identifier, name, routing_configuration, params::Dict{String,<:Any}) + +Modifies the alias of a flow. Include both fields that you want to keep and ones that you +want to change. For more information, see Deploy a flow in Amazon Bedrock in the Amazon +Bedrock User Guide. + +# Arguments +- `alias_identifier`: The unique identifier of the alias. +- `flow_identifier`: The unique identifier of the flow. +- `name`: The name of the alias. +- `routing_configuration`: Contains information about the version to which to map the alias. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A description for the alias. +""" +function update_flow_alias( + aliasIdentifier, + flowIdentifier, + name, + routingConfiguration; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/flows/$(flowIdentifier)/aliases/$(aliasIdentifier)", + Dict{String,Any}("name" => name, "routingConfiguration" => routingConfiguration); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_flow_alias( + aliasIdentifier, + flowIdentifier, + name, + routingConfiguration, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/flows/$(flowIdentifier)/aliases/$(aliasIdentifier)", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "name" => name, "routingConfiguration" => routingConfiguration + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_knowledge_base(knowledge_base_configuration, knowledge_base_id, name, role_arn, storage_configuration) update_knowledge_base(knowledge_base_configuration, knowledge_base_id, name, role_arn, storage_configuration, params::Dict{String,<:Any}) @@ -1893,3 +2788,50 @@ function update_knowledge_base( feature_set=SERVICE_FEATURE_SET, ) end + +""" + update_prompt(name, prompt_identifier) + update_prompt(name, prompt_identifier, params::Dict{String,<:Any}) + +Modifies a prompt in your prompt library. Include both fields that you want to keep and +fields that you want to replace. For more information, see Prompt management in Amazon +Bedrock and Edit prompts in your prompt library in the Amazon Bedrock User Guide. + +# Arguments +- `name`: A name for the prompt. +- `prompt_identifier`: The unique identifier of the prompt. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"customerEncryptionKeyArn"`: The Amazon Resource Name (ARN) of the KMS key to encrypt + the prompt. +- `"defaultVariant"`: The name of the default variant for the prompt. This value must match + the name field in the relevant PromptVariant object. +- `"description"`: A description for the prompt. +- `"variants"`: A list of objects, each containing details about a variant of the prompt. +""" +function update_prompt( + name, promptIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent( + "PUT", + "/prompts/$(promptIdentifier)/", + Dict{String,Any}("name" => name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_prompt( + name, + promptIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent( + "PUT", + "/prompts/$(promptIdentifier)/", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("name" => name), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/bedrock_agent_runtime.jl b/src/services/bedrock_agent_runtime.jl index 1468f14ec2..6fe659e2ee 100644 --- a/src/services/bedrock_agent_runtime.jl +++ b/src/services/bedrock_agent_runtime.jl @@ -4,24 +4,123 @@ using AWS.AWSServices: bedrock_agent_runtime using AWS.Compat using AWS.UUIDs +""" + delete_agent_memory(agent_alias_id, agent_id) + delete_agent_memory(agent_alias_id, agent_id, params::Dict{String,<:Any}) + +Deletes memory from the specified memory identifier. + +# Arguments +- `agent_alias_id`: The unique identifier of an alias of an agent. +- `agent_id`: The unique identifier of the agent to which the alias belongs. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"memoryId"`: The unique identifier of the memory. +""" +function delete_agent_memory( + agentAliasId, agentId; aws_config::AbstractAWSConfig=global_aws_config() +) + return bedrock_agent_runtime( + "DELETE", + "/agents/$(agentId)/agentAliases/$(agentAliasId)/memories"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_agent_memory( + agentAliasId, + agentId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent_runtime( + "DELETE", + "/agents/$(agentId)/agentAliases/$(agentAliasId)/memories", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_agent_memory(agent_alias_id, agent_id, memory_id, memory_type) + get_agent_memory(agent_alias_id, agent_id, memory_id, memory_type, params::Dict{String,<:Any}) + +Gets the sessions stored in the memory of the agent. + +# Arguments +- `agent_alias_id`: The unique identifier of an alias of an agent. +- `agent_id`: The unique identifier of the agent to which the alias belongs. +- `memory_id`: The unique identifier of the memory. +- `memory_type`: The type of memory. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxItems"`: The maximum number of items to return in the response. If the total number + of results is greater than this value, use the token returned in the response in the + nextToken field when making another request to return the next batch of results. +- `"nextToken"`: If the total number of results is greater than the maxItems value provided + in the request, enter the token returned in the nextToken field in the response in this + field to return the next batch of results. +""" +function get_agent_memory( + agentAliasId, + agentId, + memoryId, + memoryType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent_runtime( + "GET", + "/agents/$(agentId)/agentAliases/$(agentAliasId)/memories", + Dict{String,Any}("memoryId" => memoryId, "memoryType" => memoryType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_agent_memory( + agentAliasId, + agentId, + memoryId, + memoryType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent_runtime( + "GET", + "/agents/$(agentId)/agentAliases/$(agentAliasId)/memories", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("memoryId" => memoryId, "memoryType" => memoryType), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ invoke_agent(agent_alias_id, agent_id, session_id) invoke_agent(agent_alias_id, agent_id, session_id, params::Dict{String,<:Any}) - The CLI doesn't support InvokeAgent. Sends a prompt for the agent to process and respond -to. Note the following fields for the request: To continue the same conversation with an -agent, use the same sessionId value in the request. To activate trace enablement, turn -enableTrace to true. Trace enablement helps you follow the agent's reasoning process that -led it to the information it processed, the actions it took, and the final result it -yielded. For more information, see Trace enablement. End a conversation by setting -endSession to true. In the sessionState object, you can include attributes for the -session or prompt or, if you configured an action group to return control, results from -invocation of the action group. The response is returned in the bytes field of the chunk -object. The attribution object contains citations for parts of the response. If you set -enableTrace to true in the request, you can trace the agent's steps and reasoning process -that led it to the response. If the action predicted was configured to return control, -the response returns parameters for the action, elicited from the user, in the -returnControl field. Errors are also surfaced in the response. + The CLI doesn't support streaming operations in Amazon Bedrock, including InvokeAgent. +Sends a prompt for the agent to process and respond to. Note the following fields for the +request: To continue the same conversation with an agent, use the same sessionId value in +the request. To activate trace enablement, turn enableTrace to true. Trace enablement +helps you follow the agent's reasoning process that led it to the information it processed, +the actions it took, and the final result it yielded. For more information, see Trace +enablement. End a conversation by setting endSession to true. In the sessionState +object, you can include attributes for the session or prompt or, if you configured an +action group to return control, results from invocation of the action group. The response +is returned in the bytes field of the chunk object. The attribution object contains +citations for parts of the response. If you set enableTrace to true in the request, you +can trace the agent's steps and reasoning process that led it to the response. If the +action predicted was configured to return control, the response returns parameters for the +action, elicited from the user, in the returnControl field. Errors are also surfaced in +the response. # Arguments - `agent_alias_id`: The alias of the agent to use. @@ -37,6 +136,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"inputText"`: The prompt text to send the agent. If you include returnControlInvocationResults in the sessionState field, the inputText field will be ignored. +- `"memoryId"`: The unique identifier of the agent memory. - `"sessionState"`: Contains parameters that specify various attributes of the session. For more information, see Control session context. If you include returnControlInvocationResults in the sessionState field, the inputText field will be @@ -68,6 +168,51 @@ function invoke_agent( ) end +""" + invoke_flow(flow_alias_identifier, flow_identifier, inputs) + invoke_flow(flow_alias_identifier, flow_identifier, inputs, params::Dict{String,<:Any}) + +Invokes an alias of a flow to run the inputs that you specify and return the output of each +node as a stream. If there's an error, the error is returned. For more information, see +Test a flow in Amazon Bedrock in the Amazon Bedrock User Guide. The CLI doesn't support +streaming operations in Amazon Bedrock, including InvokeFlow. + +# Arguments +- `flow_alias_identifier`: The unique identifier of the flow alias. +- `flow_identifier`: The unique identifier of the flow. +- `inputs`: A list of objects, each containing information about an input into the flow. + +""" +function invoke_flow( + flowAliasIdentifier, + flowIdentifier, + inputs; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent_runtime( + "POST", + "/flows/$(flowIdentifier)/aliases/$(flowAliasIdentifier)", + Dict{String,Any}("inputs" => inputs); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function invoke_flow( + flowAliasIdentifier, + flowIdentifier, + inputs, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_agent_runtime( + "POST", + "/flows/$(flowIdentifier)/aliases/$(flowAliasIdentifier)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("inputs" => inputs), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ retrieve(knowledge_base_id, retrieval_query) retrieve(knowledge_base_id, retrieval_query, params::Dict{String,<:Any}) @@ -129,8 +274,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"retrieveAndGenerateConfiguration"`: Contains configurations for the knowledge base query and retrieval process. For more information, see Query configurations. - `"sessionConfiguration"`: Contains details about the session with the knowledge base. -- `"sessionId"`: The unique identifier of the session. Reuse the same value to continue the - same session with the knowledge base. +- `"sessionId"`: The unique identifier of the session. When you first make a + RetrieveAndGenerate request, Amazon Bedrock automatically generates this value. You must + reuse this value for all subsequent requests in the same conversational session. This value + allows Amazon Bedrock to maintain context and knowledge from previous interactions. You + can't explicitly set the sessionId yourself. """ function retrieve_and_generate(input; aws_config::AbstractAWSConfig=global_aws_config()) return bedrock_agent_runtime( diff --git a/src/services/bedrock_runtime.jl b/src/services/bedrock_runtime.jl index 3f4cc4d7c6..69f3b1592c 100644 --- a/src/services/bedrock_runtime.jl +++ b/src/services/bedrock_runtime.jl @@ -4,31 +4,85 @@ using AWS.AWSServices: bedrock_runtime using AWS.Compat using AWS.UUIDs +""" + apply_guardrail(content, guardrail_identifier, guardrail_version, source) + apply_guardrail(content, guardrail_identifier, guardrail_version, source, params::Dict{String,<:Any}) + +The action to apply a guardrail. + +# Arguments +- `content`: The content details used in the request to apply the guardrail. +- `guardrail_identifier`: The guardrail identifier used in the request to apply the + guardrail. +- `guardrail_version`: The guardrail version used in the request to apply the guardrail. +- `source`: The source of data used in the request to apply the guardrail. + +""" +function apply_guardrail( + content, + guardrailIdentifier, + guardrailVersion, + source; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_runtime( + "POST", + "/guardrail/$(guardrailIdentifier)/version/$(guardrailVersion)/apply", + Dict{String,Any}("content" => content, "source" => source); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function apply_guardrail( + content, + guardrailIdentifier, + guardrailVersion, + source, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return bedrock_runtime( + "POST", + "/guardrail/$(guardrailIdentifier)/version/$(guardrailVersion)/apply", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("content" => content, "source" => source), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ converse(messages, model_id) converse(messages, model_id, params::Dict{String,<:Any}) Sends messages to the specified Amazon Bedrock model. Converse provides a consistent interface that works with all models that support messages. This allows you to write code -once and use it with different models. Should a model have unique inference parameters, you -can also pass those unique parameters to the model. For information about the Converse API, -see Use the Converse API in the Amazon Bedrock User Guide. To use a guardrail, see Use a -guardrail with the Converse API in the Amazon Bedrock User Guide. To use a tool with a -model, see Tool use (Function calling) in the Amazon Bedrock User Guide For example code, -see Converse API examples in the Amazon Bedrock User Guide. This operation requires -permission for the bedrock:InvokeModel action. +once and use it with different models. If a model has unique inference parameters, you can +also pass those unique parameters to the model. Amazon Bedrock doesn't store any text, +images, or documents that you provide as content. The data is only used to generate the +response. For information about the Converse API, see Use the Converse API in the Amazon +Bedrock User Guide. To use a guardrail, see Use a guardrail with the Converse API in the +Amazon Bedrock User Guide. To use a tool with a model, see Tool use (Function calling) in +the Amazon Bedrock User Guide For example code, see Converse API examples in the Amazon +Bedrock User Guide. This operation requires permission for the bedrock:InvokeModel action. # Arguments - `messages`: The messages that you want to send to the model. - `model_id`: The identifier for the model that you want to call. The modelId to provide - depends on the type of model that you use: If you use a base model, specify the model ID - or its ARN. For a list of model IDs for base models, see Amazon Bedrock base model IDs - (on-demand throughput) in the Amazon Bedrock User Guide. If you use a provisioned model, - specify the ARN of the Provisioned Throughput. For more information, see Run inference - using a Provisioned Throughput in the Amazon Bedrock User Guide. If you use a custom - model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting - provisioned model. For more information, see Use a custom model in Amazon Bedrock in the - Amazon Bedrock User Guide. + depends on the type of model or throughput that you use: If you use a base model, specify + the model ID or its ARN. For a list of model IDs for base models, see Amazon Bedrock base + model IDs (on-demand throughput) in the Amazon Bedrock User Guide. If you use an + inference profile, specify the inference profile ID or its ARN. For a list of inference + profile IDs, see Supported Regions and models for cross-region inference in the Amazon + Bedrock User Guide. If you use a provisioned model, specify the ARN of the Provisioned + Throughput. For more information, see Run inference using a Provisioned Throughput in the + Amazon Bedrock User Guide. If you use a custom model, first purchase Provisioned + Throughput for it. Then specify the ARN of the resulting provisioned model. For more + information, see Use a custom model in Amazon Bedrock in the Amazon Bedrock User Guide. + The Converse API doesn't support imported models. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -88,24 +142,29 @@ ConverseStream provides a consistent API that works with all Amazon Bedrock mode support messages. This allows you to write code once and use it with different models. Should a model have unique inference parameters, you can also pass those unique parameters to the model. To find out if a model supports streaming, call GetFoundationModel and check -the responseStreamingSupported field in the response. For information about the Converse -API, see Use the Converse API in the Amazon Bedrock User Guide. To use a guardrail, see Use -a guardrail with the Converse API in the Amazon Bedrock User Guide. To use a tool with a -model, see Tool use (Function calling) in the Amazon Bedrock User Guide For example code, -see Conversation streaming example in the Amazon Bedrock User Guide. This operation -requires permission for the bedrock:InvokeModelWithResponseStream action. +the responseStreamingSupported field in the response. The CLI doesn't support streaming +operations in Amazon Bedrock, including ConverseStream. Amazon Bedrock doesn't store any +text, images, or documents that you provide as content. The data is only used to generate +the response. For information about the Converse API, see Use the Converse API in the +Amazon Bedrock User Guide. To use a guardrail, see Use a guardrail with the Converse API in +the Amazon Bedrock User Guide. To use a tool with a model, see Tool use (Function calling) +in the Amazon Bedrock User Guide For example code, see Conversation streaming example in +the Amazon Bedrock User Guide. This operation requires permission for the +bedrock:InvokeModelWithResponseStream action. # Arguments - `messages`: The messages that you want to send to the model. -- `model_id`: The ID for the model. The modelId to provide depends on the type of model - that you use: If you use a base model, specify the model ID or its ARN. For a list of - model IDs for base models, see Amazon Bedrock base model IDs (on-demand throughput) in the - Amazon Bedrock User Guide. If you use a provisioned model, specify the ARN of the - Provisioned Throughput. For more information, see Run inference using a Provisioned - Throughput in the Amazon Bedrock User Guide. If you use a custom model, first purchase - Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For - more information, see Use a custom model in Amazon Bedrock in the Amazon Bedrock User - Guide. +- `model_id`: The ID for the model. The modelId to provide depends on the type of model or + throughput that you use: If you use a base model, specify the model ID or its ARN. For a + list of model IDs for base models, see Amazon Bedrock base model IDs (on-demand throughput) + in the Amazon Bedrock User Guide. If you use an inference profile, specify the inference + profile ID or its ARN. For a list of inference profile IDs, see Supported Regions and + models for cross-region inference in the Amazon Bedrock User Guide. If you use a + provisioned model, specify the ARN of the Provisioned Throughput. For more information, see + Run inference using a Provisioned Throughput in the Amazon Bedrock User Guide. If you use + a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the + resulting provisioned model. For more information, see Use a custom model in Amazon Bedrock + in the Amazon Bedrock User Guide. The Converse API doesn't support imported models. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -179,7 +238,9 @@ Guide. This operation requires permission for the bedrock:InvokeModel action. inference using a Provisioned Throughput in the Amazon Bedrock User Guide. If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see Use a custom model in Amazon Bedrock - in the Amazon Bedrock User Guide. + in the Amazon Bedrock User Guide. If you use an imported model, specify the ARN of the + imported model. You can get the model ARN from a successful call to CreateModelImportJob or + from the Imported models page in the Amazon Bedrock console. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -229,9 +290,10 @@ end Invoke the specified Amazon Bedrock model to run inference using the prompt and inference parameters provided in the request body. The response is returned in a stream. To see if a model supports streaming, call GetFoundationModel and check the responseStreamingSupported -field in the response. The CLI doesn't support InvokeModelWithResponseStream. For example -code, see Invoke model with streaming code example in the Amazon Bedrock User Guide. This -operation requires permissions to perform the bedrock:InvokeModelWithResponseStream action. +field in the response. The CLI doesn't support streaming operations in Amazon Bedrock, +including InvokeModelWithResponseStream. For example code, see Invoke model with streaming +code example in the Amazon Bedrock User Guide. This operation requires permissions to +perform the bedrock:InvokeModelWithResponseStream action. # Arguments - `body`: The prompt and inference parameters in the format specified in the contentType in @@ -246,7 +308,9 @@ operation requires permissions to perform the bedrock:InvokeModelWithResponseStr inference using a Provisioned Throughput in the Amazon Bedrock User Guide. If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see Use a custom model in Amazon Bedrock - in the Amazon Bedrock User Guide. + in the Amazon Bedrock User Guide. If you use an imported model, specify the ARN of the + imported model. You can get the model ARN from a successful call to CreateModelImportJob or + from the Imported models page in the Amazon Bedrock console. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: diff --git a/src/services/chatbot.jl b/src/services/chatbot.jl index 40054a1731..80ecb52be4 100644 --- a/src/services/chatbot.jl +++ b/src/services/chatbot.jl @@ -8,22 +8,25 @@ using AWS.UUIDs create_chime_webhook_configuration(configuration_name, iam_role_arn, sns_topic_arns, webhook_description, webhook_url) create_chime_webhook_configuration(configuration_name, iam_role_arn, sns_topic_arns, webhook_description, webhook_url, params::Dict{String,<:Any}) -Creates Chime Webhook Configuration +Creates an AWS Chatbot configuration for Amazon Chime. # Arguments - `configuration_name`: The name of the configuration. -- `iam_role_arn`: This is a user-defined role that AWS Chatbot will assume. This is not the - service-linked role. For more information, see IAM Policies for AWS Chatbot. -- `sns_topic_arns`: The ARNs of the SNS topics that deliver notifications to AWS Chatbot. -- `webhook_description`: Description of the webhook. Recommend using the convention - `RoomName/WebhookName`. See Chime setup tutorial for more details: - https://docs.aws.amazon.com/chatbot/latest/adminguide/chime-setup.html. -- `webhook_url`: URL for the Chime webhook. +- `iam_role_arn`: A user-defined role that AWS Chatbot assumes. This is not the + service-linked role. For more information, see IAM policies for AWS Chatbot in the AWS + Chatbot Administrator Guide. +- `sns_topic_arns`: The Amazon Resource Names (ARNs) of the SNS topics that deliver + notifications to AWS Chatbot. +- `webhook_description`: A description of the webhook. We recommend using the convention + RoomName/WebhookName. For more information, see Tutorial: Get started with Amazon Chime in + the AWS Chatbot Administrator Guide. +- `webhook_url`: The URL for the Amazon Chime webhook. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"LoggingLevel"`: Logging levels include ERROR, INFO, or NONE. -- `"Tags"`: A list of tags to apply to the configuration. +- `"Tags"`: A map of tags assigned to a resource. A tag is a string-to-string map of + key-value pairs. """ function create_chime_webhook_configuration( ConfigurationName, @@ -81,29 +84,31 @@ end create_microsoft_teams_channel_configuration(channel_id, configuration_name, iam_role_arn, team_id, tenant_id) create_microsoft_teams_channel_configuration(channel_id, configuration_name, iam_role_arn, team_id, tenant_id, params::Dict{String,<:Any}) -Creates MS Teams Channel Configuration +Creates an AWS Chatbot configuration for Microsoft Teams. # Arguments - `channel_id`: The ID of the Microsoft Teams channel. - `configuration_name`: The name of the configuration. -- `iam_role_arn`: The ARN of the IAM role that defines the permissions for AWS Chatbot. - This is a user-defined role that AWS Chatbot will assume. This is not the service-linked - role. For more information, see IAM Policies for AWS Chatbot. -- `team_id`: The ID of the Microsoft Team authorized with AWS Chatbot. To get the team ID, - you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot - console. Then you can copy and paste the team ID from the console. For more details, see - steps 1-4 in Get started with Microsoft Teams in the AWS Chatbot Administrator Guide. +- `iam_role_arn`: A user-defined role that AWS Chatbot assumes. This is not the + service-linked role. For more information, see IAM policies for AWS Chatbot in the AWS + Chatbot Administrator Guide. +- `team_id`: The ID of the Microsoft Teams authorized with AWS Chatbot. To get the team + ID, you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot + console. Then you can copy and paste the team ID from the console. For more information, + see Step 1: Configure a Microsoft Teams client in the AWS Chatbot Administrator Guide. - `tenant_id`: The ID of the Microsoft Teams tenant. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ChannelName"`: The name of the Microsoft Teams channel. - `"GuardrailPolicyArns"`: The list of IAM policy ARNs that are applied as channel - guardrails. The AWS managed 'AdministratorAccess' policy is applied by default if this is - not set. + guardrails. The AWS managed AdministratorAccess policy is applied by default if this is not + set. - `"LoggingLevel"`: Logging levels include ERROR, INFO, or NONE. -- `"SnsTopicArns"`: The ARNs of the SNS topics that deliver notifications to AWS Chatbot. -- `"Tags"`: A list of tags to apply to the configuration. +- `"SnsTopicArns"`: The Amazon Resource Names (ARNs) of the SNS topics that deliver + notifications to AWS Chatbot. +- `"Tags"`: A map of tags assigned to a resource. A tag is a string-to-string map of + key-value pairs. - `"TeamName"`: The name of the Microsoft Teams Team. - `"UserAuthorizationRequired"`: Enables use of a user role requirement in your chat configuration. @@ -164,14 +169,14 @@ end create_slack_channel_configuration(configuration_name, iam_role_arn, slack_channel_id, slack_team_id) create_slack_channel_configuration(configuration_name, iam_role_arn, slack_channel_id, slack_team_id, params::Dict{String,<:Any}) -Creates Slack Channel Configuration +Creates an AWS Chatbot confugration for Slack. # Arguments - `configuration_name`: The name of the configuration. -- `iam_role_arn`: The ARN of the IAM role that defines the permissions for AWS Chatbot. - This is a user-defined role that AWS Chatbot will assume. This is not the service-linked - role. For more information, see IAM Policies for AWS Chatbot. -- `slack_channel_id`: The ID of the Slack channel. To get the ID, open Slack, right click +- `iam_role_arn`: A user-defined role that AWS Chatbot assumes. This is not the + service-linked role. For more information, see IAM policies for AWS Chatbot in the AWS + Chatbot Administrator Guide. +- `slack_channel_id`: The ID of the Slack channel. To get this ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ. - `slack_team_id`: The ID of the Slack workspace authorized with AWS Chatbot. @@ -179,12 +184,14 @@ Creates Slack Channel Configuration # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"GuardrailPolicyArns"`: The list of IAM policy ARNs that are applied as channel - guardrails. The AWS managed 'AdministratorAccess' policy is applied by default if this is - not set. + guardrails. The AWS managed AdministratorAccess policy is applied by default if this is not + set. - `"LoggingLevel"`: Logging levels include ERROR, INFO, or NONE. -- `"SlackChannelName"`: The name of the Slack Channel. -- `"SnsTopicArns"`: The ARNs of the SNS topics that deliver notifications to AWS Chatbot. -- `"Tags"`: A list of tags to apply to the configuration. +- `"SlackChannelName"`: The name of the Slack channel. +- `"SnsTopicArns"`: The Amazon Resource Names (ARNs) of the SNS topics that deliver + notifications to AWS Chatbot. +- `"Tags"`: A map of tags assigned to a resource. A tag is a string-to-string map of + key-value pairs. - `"UserAuthorizationRequired"`: Enables use of a user role requirement in your chat configuration. """ @@ -240,10 +247,11 @@ end delete_chime_webhook_configuration(chat_configuration_arn) delete_chime_webhook_configuration(chat_configuration_arn, params::Dict{String,<:Any}) -Deletes a Chime Webhook Configuration +Deletes a Amazon Chime webhook configuration for AWS Chatbot. # Arguments -- `chat_configuration_arn`: The ARN of the ChimeWebhookConfiguration to delete. +- `chat_configuration_arn`: The Amazon Resource Name (ARN) of the ChimeWebhookConfiguration + to delete. """ function delete_chime_webhook_configuration( @@ -281,10 +289,11 @@ end delete_microsoft_teams_channel_configuration(chat_configuration_arn) delete_microsoft_teams_channel_configuration(chat_configuration_arn, params::Dict{String,<:Any}) -Deletes MS Teams Channel Configuration +Deletes a Microsoft Teams channel configuration for AWS Chatbot # Arguments -- `chat_configuration_arn`: The ARN of the MicrosoftTeamsChannelConfiguration to delete. +- `chat_configuration_arn`: The Amazon Resource Name (ARN) of the + MicrosoftTeamsChannelConfiguration associated with the user identity to delete. """ function delete_microsoft_teams_channel_configuration( @@ -327,10 +336,11 @@ that Microsoft Teams team. Note that the Microsoft Teams team must have no chann configured to remove it. # Arguments -- `team_id`: The ID of the Microsoft Team authorized with AWS Chatbot. To get the team ID, - you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot - console. Then you can copy and paste the team ID from the console. For more details, see - steps 1-4 in Get started with Microsoft Teams in the AWS Chatbot Administrator Guide. +- `team_id`: The ID of the Microsoft Teams team authorized with AWS Chatbot. To get the + team ID, you must perform the initial authorization flow with Microsoft Teams in the AWS + Chatbot console. Then you can copy and paste the team ID from the console. For more + information, see Step 1: Configure a Microsoft Teams client in the AWS Chatbot + Administrator Guide. """ function delete_microsoft_teams_configured_team( @@ -360,12 +370,12 @@ end delete_microsoft_teams_user_identity(chat_configuration_arn, user_id) delete_microsoft_teams_user_identity(chat_configuration_arn, user_id, params::Dict{String,<:Any}) -Deletes a Teams user identity +Identifes a user level permission for a channel configuration. # Arguments - `chat_configuration_arn`: The ARN of the MicrosoftTeamsChannelConfiguration associated with the user identity to delete. -- `user_id`: Id from Microsoft Teams for user. +- `user_id`: The Microsoft Teams user ID. """ function delete_microsoft_teams_user_identity( @@ -408,10 +418,11 @@ end delete_slack_channel_configuration(chat_configuration_arn) delete_slack_channel_configuration(chat_configuration_arn, params::Dict{String,<:Any}) -Deletes Slack Channel Configuration +Deletes a Slack channel configuration for AWS Chatbot # Arguments -- `chat_configuration_arn`: The ARN of the SlackChannelConfiguration to delete. +- `chat_configuration_arn`: The Amazon Resource Name (ARN) of the SlackChannelConfiguration + to delete. """ function delete_slack_channel_configuration( @@ -449,13 +460,13 @@ end delete_slack_user_identity(chat_configuration_arn, slack_team_id, slack_user_id) delete_slack_user_identity(chat_configuration_arn, slack_team_id, slack_user_id, params::Dict{String,<:Any}) -Deletes a Slack user identity +Deletes a user level permission for a Slack channel configuration. # Arguments - `chat_configuration_arn`: The ARN of the SlackChannelConfiguration associated with the user identity to delete. - `slack_team_id`: The ID of the Slack workspace authorized with AWS Chatbot. -- `slack_user_id`: The ID of the user in Slack. +- `slack_user_id`: The ID of the user in Slack """ function delete_slack_user_identity( @@ -544,11 +555,12 @@ end describe_chime_webhook_configurations() describe_chime_webhook_configurations(params::Dict{String,<:Any}) -Lists Chime Webhook Configurations optionally filtered by ChatConfigurationArn +Lists Amazon Chime webhook configurations optionally filtered by ChatConfigurationArn # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ChatConfigurationArn"`: An optional ARN of a ChimeWebhookConfiguration to describe. +- `"ChatConfigurationArn"`: An optional Amazon Resource Number (ARN) of a + ChimeWebhookConfiguration to describe. - `"MaxResults"`: The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved. @@ -582,15 +594,16 @@ end describe_slack_channel_configurations() describe_slack_channel_configurations(params::Dict{String,<:Any}) -Lists Slack Channel Configurations optionally filtered by ChatConfigurationArn +Lists Slack channel configurations optionally filtered by ChatConfigurationArn # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ChatConfigurationArn"`: An optional ARN of a SlackChannelConfiguration to describe. +- `"ChatConfigurationArn"`: An optional Amazon Resource Number (ARN) of a + SlackChannelConfiguration to describe. - `"MaxResults"`: The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved. -- `"NextToken"`: An optional token returned from a prior request. Use this token for +- `"NextToken"`: An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults. """ @@ -624,12 +637,12 @@ Lists all Slack user identities with a mapped role. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ChatConfigurationArn"`: The ARN of the SlackChannelConfiguration associated with the - user identities to describe. +- `"ChatConfigurationArn"`: The Amazon Resource Number (ARN) of the + SlackChannelConfiguration associated with the user identities to describe. - `"MaxResults"`: The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved. -- `"NextToken"`: An optional token returned from a prior request. Use this token for +- `"NextToken"`: An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults. """ @@ -657,14 +670,15 @@ end describe_slack_workspaces() describe_slack_workspaces(params::Dict{String,<:Any}) -Lists all authorized Slack Workspaces for AWS Account +List all authorized Slack workspaces connected to the AWS Account onboarded with AWS +Chatbot. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"MaxResults"`: The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved. -- `"NextToken"`: An optional token returned from a prior request. Use this token for +- `"NextToken"`: An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults. """ @@ -692,7 +706,7 @@ end get_account_preferences() get_account_preferences(params::Dict{String,<:Any}) -Get Chatbot account level preferences +Returns AWS Chatbot account preferences. """ function get_account_preferences(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -719,10 +733,11 @@ end get_microsoft_teams_channel_configuration(chat_configuration_arn) get_microsoft_teams_channel_configuration(chat_configuration_arn, params::Dict{String,<:Any}) -Get a single MS Teams Channel Configurations +Returns a Microsoft Teams channel configuration in an AWS account. # Arguments -- `chat_configuration_arn`: The ARN of the MicrosoftTeamsChannelConfiguration to retrieve. +- `chat_configuration_arn`: The Amazon Resource Number (ARN) of the + MicrosoftTeamsChannelConfiguration to retrieve. """ function get_microsoft_teams_channel_configuration( @@ -760,7 +775,7 @@ end list_microsoft_teams_channel_configurations() list_microsoft_teams_channel_configurations(params::Dict{String,<:Any}) -Lists MS Teams Channel Configurations optionally filtered by TeamId +Lists all AWS Chatbot Microsoft Teams channel configurations in an AWS account. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -770,10 +785,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"NextToken"`: An optional token returned from a prior request. Use this token for pagination of results from this action. If this parameter is specified, the response includes only results beyond the token, up to the value specified by MaxResults. -- `"TeamId"`: The ID of the Microsoft Team authorized with AWS Chatbot. To get the team ID, - you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot - console. Then you can copy and paste the team ID from the console. For more details, see - steps 1-4 in Get started with Microsoft Teams in the AWS Chatbot Administrator Guide. +- `"TeamId"`: The ID of the Microsoft Teams authorized with AWS Chatbot. To get the team + ID, you must perform the initial authorization flow with Microsoft Teams in the AWS Chatbot + console. Then you can copy and paste the team ID from the console. For more information, + see Step 1: Configure a Microsoft Teams client in the AWS Chatbot Administrator Guide. """ function list_microsoft_teams_channel_configurations(; aws_config::AbstractAWSConfig=global_aws_config() @@ -801,7 +816,7 @@ end list_microsoft_teams_configured_teams() list_microsoft_teams_configured_teams(params::Dict{String,<:Any}) -Lists all authorized MS teams for AWS Account +Lists all authorized Microsoft Teams for an AWS Account # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -838,12 +853,12 @@ end list_microsoft_teams_user_identities() list_microsoft_teams_user_identities(params::Dict{String,<:Any}) -Lists all Microsoft Teams user identities with a mapped role. +A list all Microsoft Teams user identities with a mapped role. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"ChatConfigurationArn"`: The ARN of the MicrosoftTeamsChannelConfiguration associated - with the user identities to list. +- `"ChatConfigurationArn"`: The Amazon Resource Number (ARN) of the + MicrosoftTeamsChannelConfiguration associated with the user identities to list. - `"MaxResults"`: The maximum number of results to include in the response. If more results exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved. @@ -877,10 +892,11 @@ end list_tags_for_resource(resource_arn) list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) -Retrieves the list of tags applied to a configuration. +Lists all of the tags associated with the Amazon Resource Name (ARN) that you specify. The +resource can be a user, server, or role. # Arguments -- `resource_arn`: The ARN of the configuration. +- `resource_arn`: The ARN you specified to list the tags of. """ function list_tags_for_resource( @@ -914,7 +930,8 @@ end tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) -Applies the supplied tags to a configuration. +Attaches a key-value pair to a resource, as identified by its Amazon Resource Name (ARN). +Resources are users, servers, roles, and other entities. # Arguments - `resource_arn`: The ARN of the configuration. @@ -955,11 +972,15 @@ end untag_resource(resource_arn, tag_keys) untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) -Removes the supplied tags from a configuration +Detaches a key-value pair from a resource, as identified by its Amazon Resource Name (ARN). +Resources are users, servers, roles, and other entities. # Arguments -- `resource_arn`: The ARN of the configuration. -- `tag_keys`: A list of tag keys to remove from the configuration. +- `resource_arn`: The value of the resource that will have the tag removed. An Amazon + Resource Name (ARN) is an identifier for a specific AWS resource, such as a server, user, + or role. +- `tag_keys`: TagKeys are key-value pairs assigned to ARNs that can be used to group and + search for resources by type. This metadata can be attached to resources for any purpose. """ function untag_resource( @@ -998,7 +1019,7 @@ end update_account_preferences() update_account_preferences(params::Dict{String,<:Any}) -Update Chatbot account level preferences +Updates AWS Chatbot account preferences. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1009,7 +1030,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys develop Artificial Intelligence (AI) technologies. Your data is not shared with any third parties and is protected using sophisticated controls to prevent unauthorized access and misuse. AWS Chatbot does not store or use interactions in chat channels with Amazon Q for - training AWS Chatbot’s AI technologies. + training AI technologies for AWS Chatbot. - `"UserAuthorizationRequired"`: Enables use of a user role requirement in your chat configuration. """ @@ -1037,22 +1058,23 @@ end update_chime_webhook_configuration(chat_configuration_arn) update_chime_webhook_configuration(chat_configuration_arn, params::Dict{String,<:Any}) -Updates a Chime Webhook Configuration +Updates a Amazon Chime webhook configuration. # Arguments -- `chat_configuration_arn`: The ARN of the ChimeWebhookConfiguration to update. +- `chat_configuration_arn`: The Amazon Resource Number (ARN) of the + ChimeWebhookConfiguration to update. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"IamRoleArn"`: The ARN of the IAM role that defines the permissions for AWS Chatbot. - This is a user-defined role that AWS Chatbot will assume. This is not the service-linked - role. For more information, see IAM Policies for AWS Chatbot. +- `"IamRoleArn"`: A user-defined role that AWS Chatbot assumes. This is not the + service-linked role. For more information, see IAM policies for AWS Chatbot in the AWS + Chatbot Administrator Guide. - `"LoggingLevel"`: Logging levels include ERROR, INFO, or NONE. - `"SnsTopicArns"`: The ARNs of the SNS topics that deliver notifications to AWS Chatbot. -- `"WebhookDescription"`: Description of the webhook. Recommend using the convention - `RoomName/WebhookName`. See Chime setup tutorial for more details: - https://docs.aws.amazon.com/chatbot/latest/adminguide/chime-setup.html. -- `"WebhookUrl"`: URL for the Chime webhook. +- `"WebhookDescription"`: A description of the webhook. We recommend using the convention + RoomName/WebhookName. For more information, see Tutorial: Get started with Amazon Chime in + the AWS Chatbot Administrator Guide. +- `"WebhookUrl"`: The URL for the Amazon Chime webhook. """ function update_chime_webhook_configuration( ChatConfigurationArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -1089,23 +1111,25 @@ end update_microsoft_teams_channel_configuration(channel_id, chat_configuration_arn) update_microsoft_teams_channel_configuration(channel_id, chat_configuration_arn, params::Dict{String,<:Any}) -Updates MS Teams Channel Configuration +Updates an Microsoft Teams channel configuration. # Arguments - `channel_id`: The ID of the Microsoft Teams channel. -- `chat_configuration_arn`: The ARN of the MicrosoftTeamsChannelConfiguration to update. +- `chat_configuration_arn`: The Amazon Resource Number (ARN) of the + TeamsChannelConfiguration to update. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ChannelName"`: The name of the Microsoft Teams channel. - `"GuardrailPolicyArns"`: The list of IAM policy ARNs that are applied as channel - guardrails. The AWS managed 'AdministratorAccess' policy is applied by default if this is - not set. -- `"IamRoleArn"`: The ARN of the IAM role that defines the permissions for AWS Chatbot. - This is a user-defined role that AWS Chatbot will assume. This is not the service-linked - role. For more information, see IAM Policies for AWS Chatbot. + guardrails. The AWS managed AdministratorAccess policy is applied by default if this is not + set. +- `"IamRoleArn"`: A user-defined role that AWS Chatbot assumes. This is not the + service-linked role. For more information, see IAM policies for AWS Chatbot in the AWS + Chatbot Administrator Guide. - `"LoggingLevel"`: Logging levels include ERROR, INFO, or NONE. -- `"SnsTopicArns"`: The ARNs of the SNS topics that deliver notifications to AWS Chatbot. +- `"SnsTopicArns"`: The Amazon Resource Names (ARNs) of the SNS topics that deliver + notifications to AWS Chatbot. - `"UserAuthorizationRequired"`: Enables use of a user role requirement in your chat configuration. """ @@ -1149,25 +1173,27 @@ end update_slack_channel_configuration(chat_configuration_arn, slack_channel_id) update_slack_channel_configuration(chat_configuration_arn, slack_channel_id, params::Dict{String,<:Any}) -Updates Slack Channel Configuration +Updates a Slack channel configuration. # Arguments -- `chat_configuration_arn`: The ARN of the SlackChannelConfiguration to update. -- `slack_channel_id`: The ID of the Slack channel. To get the ID, open Slack, right click +- `chat_configuration_arn`: The Amazon Resource Number (ARN) of the + SlackChannelConfiguration to update. +- `slack_channel_id`: The ID of the Slack channel. To get this ID, open Slack, right click on the channel name in the left pane, then choose Copy Link. The channel ID is the 9-character string at the end of the URL. For example, ABCBBLZZZ. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"GuardrailPolicyArns"`: The list of IAM policy ARNs that are applied as channel - guardrails. The AWS managed 'AdministratorAccess' policy is applied by default if this is - not set. -- `"IamRoleArn"`: The ARN of the IAM role that defines the permissions for AWS Chatbot. - This is a user-defined role that AWS Chatbot will assume. This is not the service-linked - role. For more information, see IAM Policies for AWS Chatbot. + guardrails. The AWS managed AdministratorAccess policy is applied by default if this is not + set. +- `"IamRoleArn"`: A user-defined role that AWS Chatbot assumes. This is not the + service-linked role. For more information, see IAM policies for AWS Chatbot in the AWS + Chatbot Administrator Guide. - `"LoggingLevel"`: Logging levels include ERROR, INFO, or NONE. -- `"SlackChannelName"`: The name of the Slack Channel. -- `"SnsTopicArns"`: The ARNs of the SNS topics that deliver notifications to AWS Chatbot. +- `"SlackChannelName"`: The name of the Slack channel. +- `"SnsTopicArns"`: The Amazon Resource Names (ARNs) of the SNS topics that deliver + notifications to AWS Chatbot. - `"UserAuthorizationRequired"`: Enables use of a user role requirement in your chat configuration. """ diff --git a/src/services/chime_sdk_media_pipelines.jl b/src/services/chime_sdk_media_pipelines.jl index 3c9ec4e109..39777491d0 100644 --- a/src/services/chime_sdk_media_pipelines.jl +++ b/src/services/chime_sdk_media_pipelines.jl @@ -320,16 +320,26 @@ end create_media_pipeline_kinesis_video_stream_pool(pool_name, stream_configuration) create_media_pipeline_kinesis_video_stream_pool(pool_name, stream_configuration, params::Dict{String,<:Any}) -Creates an Kinesis video stream pool for the media pipeline. +Creates an Amazon Kinesis Video Stream pool for use with media stream pipelines. If a +meeting uses an opt-in Region as its MediaRegion, the KVS stream must be in that same +Region. For example, if a meeting uses the af-south-1 Region, the KVS stream must also be +in af-south-1. However, if the meeting uses a Region that AWS turns on by default, the KVS +stream can be in any available Region, including an opt-in Region. For example, if the +meeting uses ca-central-1, the KVS stream can be in eu-west-2, us-east-1, af-south-1, or +any other Region that the Amazon Chime SDK supports. To learn which AWS Region a meeting +uses, call the GetMeeting API and use the MediaRegion parameter from the response. For more +information about opt-in Regions, refer to Available Regions in the Amazon Chime SDK +Developer Guide, and Specify which AWS Regions your account can use, in the AWS Account +Management Reference Guide. # Arguments -- `pool_name`: The name of the video stream pool. -- `stream_configuration`: The configuration settings for the video stream. +- `pool_name`: The name of the pool. +- `stream_configuration`: The configuration settings for the stream. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientRequestToken"`: The token assigned to the client making the request. -- `"Tags"`: The tags assigned to the video stream pool. +- `"Tags"`: The tags assigned to the stream pool. """ function create_media_pipeline_kinesis_video_stream_pool( PoolName, StreamConfiguration; aws_config::AbstractAWSConfig=global_aws_config() @@ -531,10 +541,11 @@ end delete_media_pipeline_kinesis_video_stream_pool(identifier) delete_media_pipeline_kinesis_video_stream_pool(identifier, params::Dict{String,<:Any}) -Deletes an Kinesis video stream pool. +Deletes an Amazon Kinesis Video Stream pool. # Arguments -- `identifier`: The ID of the pool being deleted. +- `identifier`: The unique identifier of the requested resource. Valid values include the + name and ARN of the media pipeline Kinesis Video Stream pool. """ function delete_media_pipeline_kinesis_video_stream_pool( @@ -671,7 +682,8 @@ end Gets an Kinesis video stream pool. # Arguments -- `identifier`: The ID of the video stream pool. +- `identifier`: The unique identifier of the requested resource. Valid values include the + name and ARN of the media pipeline Kinesis Video Stream pool. """ function get_media_pipeline_kinesis_video_stream_pool( @@ -1314,10 +1326,11 @@ end update_media_pipeline_kinesis_video_stream_pool(identifier) update_media_pipeline_kinesis_video_stream_pool(identifier, params::Dict{String,<:Any}) -Updates an Kinesis video stream pool in a media pipeline. +Updates an Amazon Kinesis Video Stream pool in a media pipeline. # Arguments -- `identifier`: The ID of the video stream pool. +- `identifier`: The unique identifier of the requested resource. Valid values include the + name and ARN of the media pipeline Kinesis Video Stream pool. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: diff --git a/src/services/cleanrooms.jl b/src/services/cleanrooms.jl index 0d2c9cc8f5..175829ac3d 100644 --- a/src/services/cleanrooms.jl +++ b/src/services/cleanrooms.jl @@ -440,7 +440,8 @@ Creates a new analysis rule for a configured table. Currently, only one analysis be created for a given configured table. # Arguments -- `analysis_rule_policy`: The entire created configured table analysis rule object. +- `analysis_rule_policy`: The analysis rule policy that was created for the configured + table. - `analysis_rule_type`: The type of analysis rule. - `configured_table_identifier`: The identifier for the configured table to create the analysis rule for. Currently accepts the configured table ID. @@ -560,6 +561,187 @@ function create_configured_table_association( ) end +""" + create_configured_table_association_analysis_rule(analysis_rule_policy, analysis_rule_type, configured_table_association_identifier, membership_identifier) + create_configured_table_association_analysis_rule(analysis_rule_policy, analysis_rule_type, configured_table_association_identifier, membership_identifier, params::Dict{String,<:Any}) + + Creates a new analysis rule for an associated configured table. + +# Arguments +- `analysis_rule_policy`: The analysis rule policy that was created for the configured + table association. +- `analysis_rule_type`: The type of analysis rule. +- `configured_table_association_identifier`: The unique ID for the configured table + association. Currently accepts the configured table association ID. +- `membership_identifier`: A unique identifier for the membership that the configured + table association belongs to. Currently accepts the membership ID. + +""" +function create_configured_table_association_analysis_rule( + analysisRulePolicy, + analysisRuleType, + configuredTableAssociationIdentifier, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "POST", + "/memberships/$(membershipIdentifier)/configuredTableAssociations/$(configuredTableAssociationIdentifier)/analysisRule", + Dict{String,Any}( + "analysisRulePolicy" => analysisRulePolicy, + "analysisRuleType" => analysisRuleType, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_configured_table_association_analysis_rule( + analysisRulePolicy, + analysisRuleType, + configuredTableAssociationIdentifier, + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "POST", + "/memberships/$(membershipIdentifier)/configuredTableAssociations/$(configuredTableAssociationIdentifier)/analysisRule", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "analysisRulePolicy" => analysisRulePolicy, + "analysisRuleType" => analysisRuleType, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_id_mapping_table(input_reference_config, membership_identifier, name) + create_id_mapping_table(input_reference_config, membership_identifier, name, params::Dict{String,<:Any}) + +Creates an ID mapping table. + +# Arguments +- `input_reference_config`: The input reference configuration needed to create the ID + mapping table. +- `membership_identifier`: The unique identifier of the membership that contains the ID + mapping table. +- `name`: A name for the ID mapping table. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A description of the ID mapping table. +- `"kmsKeyArn"`: The Amazon Resource Name (ARN) of the Amazon Web Services KMS key. This + value is used to encrypt the mapping table data that is stored by Clean Rooms. +- `"tags"`: An optional label that you can assign to a resource when you create it. Each + tag consists of a key and an optional value, both of which you define. When you use + tagging, you can also use tag-based access control in IAM policies to control access to + this resource. +""" +function create_id_mapping_table( + inputReferenceConfig, + membershipIdentifier, + name; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "POST", + "/memberships/$(membershipIdentifier)/idmappingtables", + Dict{String,Any}("inputReferenceConfig" => inputReferenceConfig, "name" => name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_id_mapping_table( + inputReferenceConfig, + membershipIdentifier, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "POST", + "/memberships/$(membershipIdentifier)/idmappingtables", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "inputReferenceConfig" => inputReferenceConfig, "name" => name + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_id_namespace_association(input_reference_config, membership_identifier, name) + create_id_namespace_association(input_reference_config, membership_identifier, name, params::Dict{String,<:Any}) + +Creates an ID namespace association. + +# Arguments +- `input_reference_config`: The input reference configuration needed to create the ID + namespace association. +- `membership_identifier`: The unique identifier of the membership that contains the ID + namespace association. +- `name`: The name for the ID namespace association. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: The description of the ID namespace association. +- `"idMappingConfig"`: The configuration settings for the ID mapping table. +- `"tags"`: An optional label that you can assign to a resource when you create it. Each + tag consists of a key and an optional value, both of which you define. When you use + tagging, you can also use tag-based access control in IAM policies to control access to + this resource. +""" +function create_id_namespace_association( + inputReferenceConfig, + membershipIdentifier, + name; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "POST", + "/memberships/$(membershipIdentifier)/idnamespaceassociations", + Dict{String,Any}("inputReferenceConfig" => inputReferenceConfig, "name" => name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_id_namespace_association( + inputReferenceConfig, + membershipIdentifier, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "POST", + "/memberships/$(membershipIdentifier)/idnamespaceassociations", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "inputReferenceConfig" => inputReferenceConfig, "name" => name + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_membership(collaboration_identifier, query_log_status) create_membership(collaboration_identifier, query_log_status, params::Dict{String,<:Any}) @@ -924,6 +1106,129 @@ function delete_configured_table_association( ) end +""" + delete_configured_table_association_analysis_rule(analysis_rule_type, configured_table_association_identifier, membership_identifier) + delete_configured_table_association_analysis_rule(analysis_rule_type, configured_table_association_identifier, membership_identifier, params::Dict{String,<:Any}) + +Deletes an analysis rule for a configured table association. + +# Arguments +- `analysis_rule_type`: The type of the analysis rule that you want to delete. +- `configured_table_association_identifier`: The identifier for the configured table + association that's related to the analysis rule that you want to delete. +- `membership_identifier`: A unique identifier for the membership that the configured + table association belongs to. Currently accepts the membership ID. + +""" +function delete_configured_table_association_analysis_rule( + analysisRuleType, + configuredTableAssociationIdentifier, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "DELETE", + "/memberships/$(membershipIdentifier)/configuredTableAssociations/$(configuredTableAssociationIdentifier)/analysisRule/$(analysisRuleType)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_configured_table_association_analysis_rule( + analysisRuleType, + configuredTableAssociationIdentifier, + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "DELETE", + "/memberships/$(membershipIdentifier)/configuredTableAssociations/$(configuredTableAssociationIdentifier)/analysisRule/$(analysisRuleType)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_id_mapping_table(id_mapping_table_identifier, membership_identifier) + delete_id_mapping_table(id_mapping_table_identifier, membership_identifier, params::Dict{String,<:Any}) + +Deletes an ID mapping table. + +# Arguments +- `id_mapping_table_identifier`: The unique identifier of the ID mapping table that you + want to delete. +- `membership_identifier`: The unique identifier of the membership that contains the ID + mapping table that you want to delete. + +""" +function delete_id_mapping_table( + idMappingTableIdentifier, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "DELETE", + "/memberships/$(membershipIdentifier)/idmappingtables/$(idMappingTableIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_id_mapping_table( + idMappingTableIdentifier, + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "DELETE", + "/memberships/$(membershipIdentifier)/idmappingtables/$(idMappingTableIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_id_namespace_association(id_namespace_association_identifier, membership_identifier) + delete_id_namespace_association(id_namespace_association_identifier, membership_identifier, params::Dict{String,<:Any}) + +Deletes an ID namespace association. + +# Arguments +- `id_namespace_association_identifier`: The unique identifier of the ID namespace + association that you want to delete. +- `membership_identifier`: The unique identifier of the membership that contains the ID + namespace association that you want to delete. + +""" +function delete_id_namespace_association( + idNamespaceAssociationIdentifier, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "DELETE", + "/memberships/$(membershipIdentifier)/idnamespaceassociations/$(idNamespaceAssociationIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_id_namespace_association( + idNamespaceAssociationIdentifier, + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "DELETE", + "/memberships/$(membershipIdentifier)/idnamespaceassociations/$(idNamespaceAssociationIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_member(account_id, collaboration_identifier) delete_member(account_id, collaboration_identifier, params::Dict{String,<:Any}) @@ -1189,6 +1494,46 @@ function get_collaboration_configured_audience_model_association( ) end +""" + get_collaboration_id_namespace_association(collaboration_identifier, id_namespace_association_identifier) + get_collaboration_id_namespace_association(collaboration_identifier, id_namespace_association_identifier, params::Dict{String,<:Any}) + +Retrieves an ID namespace association from a specific collaboration. + +# Arguments +- `collaboration_identifier`: The unique identifier of the collaboration that contains the + ID namespace association that you want to retrieve. +- `id_namespace_association_identifier`: The unique identifier of the ID namespace + association that you want to retrieve. + +""" +function get_collaboration_id_namespace_association( + collaborationIdentifier, + idNamespaceAssociationIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/collaborations/$(collaborationIdentifier)/idnamespaceassociations/$(idNamespaceAssociationIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_collaboration_id_namespace_association( + collaborationIdentifier, + idNamespaceAssociationIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/collaborations/$(collaborationIdentifier)/idnamespaceassociations/$(idNamespaceAssociationIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_collaboration_privacy_budget_template(collaboration_identifier, privacy_budget_template_identifier) get_collaboration_privacy_budget_template(collaboration_identifier, privacy_budget_template_identifier, params::Dict{String,<:Any}) @@ -1283,19 +1628,142 @@ function get_configured_table( ) return cleanrooms( "GET", - "/configuredTables/$(configuredTableIdentifier)"; + "/configuredTables/$(configuredTableIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_configured_table( + configuredTableIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/configuredTables/$(configuredTableIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_configured_table_analysis_rule(analysis_rule_type, configured_table_identifier) + get_configured_table_analysis_rule(analysis_rule_type, configured_table_identifier, params::Dict{String,<:Any}) + +Retrieves a configured table analysis rule. + +# Arguments +- `analysis_rule_type`: The analysis rule to be retrieved. Configured table analysis rules + are uniquely identified by their configured table identifier and analysis rule type. +- `configured_table_identifier`: The unique identifier for the configured table to + retrieve. Currently accepts the configured table ID. + +""" +function get_configured_table_analysis_rule( + analysisRuleType, + configuredTableIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/configuredTables/$(configuredTableIdentifier)/analysisRule/$(analysisRuleType)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_configured_table_analysis_rule( + analysisRuleType, + configuredTableIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/configuredTables/$(configuredTableIdentifier)/analysisRule/$(analysisRuleType)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_configured_table_association(configured_table_association_identifier, membership_identifier) + get_configured_table_association(configured_table_association_identifier, membership_identifier, params::Dict{String,<:Any}) + +Retrieves a configured table association. + +# Arguments +- `configured_table_association_identifier`: The unique ID for the configured table + association to retrieve. Currently accepts the configured table ID. +- `membership_identifier`: A unique identifier for the membership that the configured table + association belongs to. Currently accepts the membership ID. + +""" +function get_configured_table_association( + configuredTableAssociationIdentifier, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/memberships/$(membershipIdentifier)/configuredTableAssociations/$(configuredTableAssociationIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_configured_table_association( + configuredTableAssociationIdentifier, + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/memberships/$(membershipIdentifier)/configuredTableAssociations/$(configuredTableAssociationIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_configured_table_association_analysis_rule(analysis_rule_type, configured_table_association_identifier, membership_identifier) + get_configured_table_association_analysis_rule(analysis_rule_type, configured_table_association_identifier, membership_identifier, params::Dict{String,<:Any}) + + Retrieves the analysis rule for a configured table association. + +# Arguments +- `analysis_rule_type`: The type of analysis rule that you want to retrieve. +- `configured_table_association_identifier`: The identifier for the configured table + association that's related to the analysis rule. +- `membership_identifier`: A unique identifier for the membership that the configured + table association belongs to. Currently accepts the membership ID. + +""" +function get_configured_table_association_analysis_rule( + analysisRuleType, + configuredTableAssociationIdentifier, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/memberships/$(membershipIdentifier)/configuredTableAssociations/$(configuredTableAssociationIdentifier)/analysisRule/$(analysisRuleType)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function get_configured_table( - configuredTableIdentifier, +function get_configured_table_association_analysis_rule( + analysisRuleType, + configuredTableAssociationIdentifier, + membershipIdentifier, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return cleanrooms( "GET", - "/configuredTables/$(configuredTableIdentifier)", + "/memberships/$(membershipIdentifier)/configuredTableAssociations/$(configuredTableAssociationIdentifier)/analysisRule/$(analysisRuleType)", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -1303,39 +1771,39 @@ function get_configured_table( end """ - get_configured_table_analysis_rule(analysis_rule_type, configured_table_identifier) - get_configured_table_analysis_rule(analysis_rule_type, configured_table_identifier, params::Dict{String,<:Any}) + get_id_mapping_table(id_mapping_table_identifier, membership_identifier) + get_id_mapping_table(id_mapping_table_identifier, membership_identifier, params::Dict{String,<:Any}) -Retrieves a configured table analysis rule. +Retrieves an ID mapping table. # Arguments -- `analysis_rule_type`: The analysis rule to be retrieved. Configured table analysis rules - are uniquely identified by their configured table identifier and analysis rule type. -- `configured_table_identifier`: The unique identifier for the configured table to - retrieve. Currently accepts the configured table ID. +- `id_mapping_table_identifier`: The unique identifier of the ID mapping table identifier + that you want to retrieve. +- `membership_identifier`: The unique identifier of the membership that contains the ID + mapping table that you want to retrieve. """ -function get_configured_table_analysis_rule( - analysisRuleType, - configuredTableIdentifier; +function get_id_mapping_table( + idMappingTableIdentifier, + membershipIdentifier; aws_config::AbstractAWSConfig=global_aws_config(), ) return cleanrooms( "GET", - "/configuredTables/$(configuredTableIdentifier)/analysisRule/$(analysisRuleType)"; + "/memberships/$(membershipIdentifier)/idmappingtables/$(idMappingTableIdentifier)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function get_configured_table_analysis_rule( - analysisRuleType, - configuredTableIdentifier, +function get_id_mapping_table( + idMappingTableIdentifier, + membershipIdentifier, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return cleanrooms( "GET", - "/configuredTables/$(configuredTableIdentifier)/analysisRule/$(analysisRuleType)", + "/memberships/$(membershipIdentifier)/idmappingtables/$(idMappingTableIdentifier)", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -1343,39 +1811,39 @@ function get_configured_table_analysis_rule( end """ - get_configured_table_association(configured_table_association_identifier, membership_identifier) - get_configured_table_association(configured_table_association_identifier, membership_identifier, params::Dict{String,<:Any}) + get_id_namespace_association(id_namespace_association_identifier, membership_identifier) + get_id_namespace_association(id_namespace_association_identifier, membership_identifier, params::Dict{String,<:Any}) -Retrieves a configured table association. +Retrieves an ID namespace association. # Arguments -- `configured_table_association_identifier`: The unique ID for the configured table - association to retrieve. Currently accepts the configured table ID. -- `membership_identifier`: A unique identifier for the membership that the configured table - association belongs to. Currently accepts the membership ID. +- `id_namespace_association_identifier`: The unique identifier of the ID namespace + association that you want to retrieve. +- `membership_identifier`: The unique identifier of the membership that contains the ID + namespace association that you want to retrieve. """ -function get_configured_table_association( - configuredTableAssociationIdentifier, +function get_id_namespace_association( + idNamespaceAssociationIdentifier, membershipIdentifier; aws_config::AbstractAWSConfig=global_aws_config(), ) return cleanrooms( "GET", - "/memberships/$(membershipIdentifier)/configuredTableAssociations/$(configuredTableAssociationIdentifier)"; + "/memberships/$(membershipIdentifier)/idnamespaceassociations/$(idNamespaceAssociationIdentifier)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function get_configured_table_association( - configuredTableAssociationIdentifier, +function get_id_namespace_association( + idNamespaceAssociationIdentifier, membershipIdentifier, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return cleanrooms( "GET", - "/memberships/$(membershipIdentifier)/configuredTableAssociations/$(configuredTableAssociationIdentifier)", + "/memberships/$(membershipIdentifier)/idnamespaceassociations/$(idNamespaceAssociationIdentifier)", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -1691,6 +2159,47 @@ function list_collaboration_configured_audience_model_associations( ) end +""" + list_collaboration_id_namespace_associations(collaboration_identifier) + list_collaboration_id_namespace_associations(collaboration_identifier, params::Dict{String,<:Any}) + +Returns a list of the ID namespace associations in a collaboration. + +# Arguments +- `collaboration_identifier`: The unique identifier of the collaboration that contains the + ID namespace associations that you want to retrieve. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum size of the results that is returned per call. Service + chooses a default if it has not been set. Service may return a nextToken even if the + maximum results has not been met.> +- `"nextToken"`: The pagination token that's used to fetch the next set of results. +""" +function list_collaboration_id_namespace_associations( + collaborationIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanrooms( + "GET", + "/collaborations/$(collaborationIdentifier)/idnamespaceassociations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_collaboration_id_namespace_associations( + collaborationIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/collaborations/$(collaborationIdentifier)/idnamespaceassociations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_collaboration_privacy_budget_templates(collaboration_identifier) list_collaboration_privacy_budget_templates(collaboration_identifier, params::Dict{String,<:Any}) @@ -1927,6 +2436,88 @@ function list_configured_tables( ) end +""" + list_id_mapping_tables(membership_identifier) + list_id_mapping_tables(membership_identifier, params::Dict{String,<:Any}) + +Returns a list of ID mapping tables. + +# Arguments +- `membership_identifier`: The unique identifier of the membership that contains the ID + mapping tables that you want to view. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum size of the results that is returned per call. Service + chooses a default if it has not been set. Service may return a nextToken even if the + maximum results has not been met. +- `"nextToken"`: The pagination token that's used to fetch the next set of results. +""" +function list_id_mapping_tables( + membershipIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanrooms( + "GET", + "/memberships/$(membershipIdentifier)/idmappingtables"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_id_mapping_tables( + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/memberships/$(membershipIdentifier)/idmappingtables", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_id_namespace_associations(membership_identifier) + list_id_namespace_associations(membership_identifier, params::Dict{String,<:Any}) + +Returns a list of ID namespace associations. + +# Arguments +- `membership_identifier`: The unique identifier of the membership that contains the ID + namespace association that you want to view. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum size of the results that is returned per call. Service + chooses a default if it has not been set. Service may return a nextToken even if the + maximum results has not been met. +- `"nextToken"`: The pagination token that's used to fetch the next set of results. +""" +function list_id_namespace_associations( + membershipIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return cleanrooms( + "GET", + "/memberships/$(membershipIdentifier)/idnamespaceassociations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_id_namespace_associations( + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "GET", + "/memberships/$(membershipIdentifier)/idnamespaceassociations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_members(collaboration_identifier) list_members(collaboration_identifier, params::Dict{String,<:Any}) @@ -2211,6 +2802,46 @@ function list_tags_for_resource( ) end +""" + populate_id_mapping_table(id_mapping_table_identifier, membership_identifier) + populate_id_mapping_table(id_mapping_table_identifier, membership_identifier, params::Dict{String,<:Any}) + +Defines the information that's necessary to populate an ID mapping table. + +# Arguments +- `id_mapping_table_identifier`: The unique identifier of the ID mapping table that you + want to populate. +- `membership_identifier`: The unique identifier of the membership that contains the ID + mapping table that you want to populate. + +""" +function populate_id_mapping_table( + idMappingTableIdentifier, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "POST", + "/memberships/$(membershipIdentifier)/idmappingtables/$(idMappingTableIdentifier)/populate"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function populate_id_mapping_table( + idMappingTableIdentifier, + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "POST", + "/memberships/$(membershipIdentifier)/idmappingtables/$(idMappingTableIdentifier)/populate", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ preview_privacy_impact(membership_identifier, parameters) preview_privacy_impact(membership_identifier, parameters, params::Dict{String,<:Any}) @@ -2635,6 +3266,147 @@ function update_configured_table_association( ) end +""" + update_configured_table_association_analysis_rule(analysis_rule_policy, analysis_rule_type, configured_table_association_identifier, membership_identifier) + update_configured_table_association_analysis_rule(analysis_rule_policy, analysis_rule_type, configured_table_association_identifier, membership_identifier, params::Dict{String,<:Any}) + + Updates the analysis rule for a configured table association. + +# Arguments +- `analysis_rule_policy`: The updated analysis rule policy for the configured table + association. +- `analysis_rule_type`: The analysis rule type that you want to update. +- `configured_table_association_identifier`: The identifier for the configured table + association to update. +- `membership_identifier`: A unique identifier for the membership that the configured + table association belongs to. Currently accepts the membership ID. + +""" +function update_configured_table_association_analysis_rule( + analysisRulePolicy, + analysisRuleType, + configuredTableAssociationIdentifier, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "PATCH", + "/memberships/$(membershipIdentifier)/configuredTableAssociations/$(configuredTableAssociationIdentifier)/analysisRule/$(analysisRuleType)", + Dict{String,Any}("analysisRulePolicy" => analysisRulePolicy); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_configured_table_association_analysis_rule( + analysisRulePolicy, + analysisRuleType, + configuredTableAssociationIdentifier, + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "PATCH", + "/memberships/$(membershipIdentifier)/configuredTableAssociations/$(configuredTableAssociationIdentifier)/analysisRule/$(analysisRuleType)", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("analysisRulePolicy" => analysisRulePolicy), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_id_mapping_table(id_mapping_table_identifier, membership_identifier) + update_id_mapping_table(id_mapping_table_identifier, membership_identifier, params::Dict{String,<:Any}) + +Provides the details that are necessary to update an ID mapping table. + +# Arguments +- `id_mapping_table_identifier`: The unique identifier of the ID mapping table that you + want to update. +- `membership_identifier`: The unique identifier of the membership that contains the ID + mapping table that you want to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A new description for the ID mapping table. +- `"kmsKeyArn"`: The Amazon Resource Name (ARN) of the Amazon Web Services KMS key. +""" +function update_id_mapping_table( + idMappingTableIdentifier, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "PATCH", + "/memberships/$(membershipIdentifier)/idmappingtables/$(idMappingTableIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_id_mapping_table( + idMappingTableIdentifier, + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "PATCH", + "/memberships/$(membershipIdentifier)/idmappingtables/$(idMappingTableIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_id_namespace_association(id_namespace_association_identifier, membership_identifier) + update_id_namespace_association(id_namespace_association_identifier, membership_identifier, params::Dict{String,<:Any}) + +Provides the details that are necessary to update an ID namespace association. + +# Arguments +- `id_namespace_association_identifier`: The unique identifier of the ID namespace + association that you want to update. +- `membership_identifier`: The unique identifier of the membership that contains the ID + namespace association that you want to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: A new description for the ID namespace association. +- `"idMappingConfig"`: The configuration settings for the ID mapping table. +- `"name"`: A new name for the ID namespace association. +""" +function update_id_namespace_association( + idNamespaceAssociationIdentifier, + membershipIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "PATCH", + "/memberships/$(membershipIdentifier)/idnamespaceassociations/$(idNamespaceAssociationIdentifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_id_namespace_association( + idNamespaceAssociationIdentifier, + membershipIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return cleanrooms( + "PATCH", + "/memberships/$(membershipIdentifier)/idnamespaceassociations/$(idNamespaceAssociationIdentifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_membership(membership_identifier) update_membership(membership_identifier, params::Dict{String,<:Any}) diff --git a/src/services/cloudfront.jl b/src/services/cloudfront.jl index 21bf5b85ff..1ce2a1b870 100644 --- a/src/services/cloudfront.jl +++ b/src/services/cloudfront.jl @@ -3692,7 +3692,8 @@ end list_tags_for_resource2020_05_31(resource) list_tags_for_resource2020_05_31(resource, params::Dict{String,<:Any}) -List tags for a CloudFront resource. +List tags for a CloudFront resource. For more information, see Tagging a distribution in +the Amazon CloudFront Developer Guide. # Arguments - `resource`: An ARN of a CloudFront resource. @@ -3778,7 +3779,8 @@ end tag_resource2020_05_31(resource, tags) tag_resource2020_05_31(resource, tags, params::Dict{String,<:Any}) -Add tags to a CloudFront resource. +Add tags to a CloudFront resource. For more information, see Tagging a distribution in the +Amazon CloudFront Developer Guide. # Arguments - `resource`: An ARN of a CloudFront resource. @@ -3884,7 +3886,8 @@ end untag_resource2020_05_31(resource, tag_keys) untag_resource2020_05_31(resource, tag_keys, params::Dict{String,<:Any}) -Remove tags from a CloudFront resource. +Remove tags from a CloudFront resource. For more information, see Tagging a distribution in +the Amazon CloudFront Developer Guide. # Arguments - `resource`: An ARN of a CloudFront resource. diff --git a/src/services/cloudhsm_v2.jl b/src/services/cloudhsm_v2.jl index 55326e9439..6e20bec62a 100644 --- a/src/services/cloudhsm_v2.jl +++ b/src/services/cloudhsm_v2.jl @@ -8,7 +8,8 @@ using AWS.UUIDs copy_backup_to_region(backup_id, destination_region) copy_backup_to_region(backup_id, destination_region, params::Dict{String,<:Any}) -Copy an AWS CloudHSM cluster backup to a different region. +Copy an CloudHSM cluster backup to a different region. Cross-account use: No. You cannot +perform this operation on an CloudHSM backup in a different Amazon Web Services account. # Arguments - `backup_id`: The ID of the backup that will be copied to the destination region. @@ -57,7 +58,9 @@ end create_cluster(hsm_type, subnet_ids) create_cluster(hsm_type, subnet_ids, params::Dict{String,<:Any}) -Creates a new AWS CloudHSM cluster. +Creates a new CloudHSM cluster. Cross-account use: Yes. To perform this operation with an +CloudHSM backup in a different AWS account, specify the full backup ARN in the value of the +SourceBackupId parameter. # Arguments - `hsm_type`: The type of HSM to use in the cluster. The allowed values are hsm1.medium and @@ -71,9 +74,10 @@ Creates a new AWS CloudHSM cluster. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"BackupRetentionPolicy"`: A policy that defines how the service retains backups. - `"Mode"`: The mode to use in the cluster. The allowed values are FIPS and NON_FIPS. -- `"SourceBackupId"`: The identifier (ID) of the cluster backup to restore. Use this value - to restore the cluster from a backup instead of creating a new cluster. To find the backup - ID, use DescribeBackups. +- `"SourceBackupId"`: The identifier (ID) or the Amazon Resource Name (ARN) of the cluster + backup to restore. Use this value to restore the cluster from a backup instead of creating + a new cluster. To find the backup ID or ARN, use DescribeBackups. If using a backup in + another account, the full ARN must be supplied. - `"TagList"`: Tags to apply to the CloudHSM cluster during creation. """ function create_cluster( @@ -110,7 +114,9 @@ end create_hsm(availability_zone, cluster_id) create_hsm(availability_zone, cluster_id, params::Dict{String,<:Any}) -Creates a new hardware security module (HSM) in the specified AWS CloudHSM cluster. +Creates a new hardware security module (HSM) in the specified CloudHSM cluster. +Cross-account use: No. You cannot perform this operation on an CloudHSM cluster in a +different Amazon Web Service account. # Arguments - `availability_zone`: The Availability Zone where you are creating the HSM. To find the @@ -160,8 +166,10 @@ end delete_backup(backup_id) delete_backup(backup_id, params::Dict{String,<:Any}) -Deletes a specified AWS CloudHSM backup. A backup can be restored up to 7 days after the -DeleteBackup request is made. For more information on restoring a backup, see RestoreBackup. +Deletes a specified CloudHSM backup. A backup can be restored up to 7 days after the +DeleteBackup request is made. For more information on restoring a backup, see +RestoreBackup. Cross-account use: No. You cannot perform this operation on an CloudHSM +backup in a different Amazon Web Services account. # Arguments - `backup_id`: The ID of the backup to be deleted. To find the ID of a backup, use the @@ -195,9 +203,10 @@ end delete_cluster(cluster_id) delete_cluster(cluster_id, params::Dict{String,<:Any}) -Deletes the specified AWS CloudHSM cluster. Before you can delete a cluster, you must -delete all HSMs in the cluster. To see if the cluster contains any HSMs, use -DescribeClusters. To delete an HSM, use DeleteHsm. +Deletes the specified CloudHSM cluster. Before you can delete a cluster, you must delete +all HSMs in the cluster. To see if the cluster contains any HSMs, use DescribeClusters. To +delete an HSM, use DeleteHsm. Cross-account use: No. You cannot perform this operation on +an CloudHSM cluster in a different Amazon Web Services account. # Arguments - `cluster_id`: The identifier (ID) of the cluster that you are deleting. To find the @@ -234,6 +243,8 @@ end Deletes the specified HSM. To specify an HSM, you can use its identifier (ID), the IP address of the HSM's elastic network interface (ENI), or the ID of the HSM's ENI. You need to specify only one of these values. To find these values, use DescribeClusters. +Cross-account use: No. You cannot perform this operation on an CloudHSM hsm in a different +Amazon Web Services account. # Arguments - `cluster_id`: The identifier (ID) of the cluster that contains the HSM that you are @@ -270,15 +281,49 @@ function delete_hsm( ) end +""" + delete_resource_policy() + delete_resource_policy(params::Dict{String,<:Any}) + + Deletes an CloudHSM resource policy. Deleting a resource policy will result in the +resource being unshared and removed from any RAM resource shares. Deleting the resource +policy attached to a backup will not impact any clusters created from that backup. +Cross-account use: No. You cannot perform this operation on an CloudHSM resource in a +different Amazon Web Services account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ResourceArn"`: Amazon Resource Name (ARN) of the resource from which the policy will be + removed. +""" +function delete_resource_policy(; aws_config::AbstractAWSConfig=global_aws_config()) + return cloudhsm_v2( + "DeleteResourcePolicy"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function delete_resource_policy( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudhsm_v2( + "DeleteResourcePolicy", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_backups() describe_backups(params::Dict{String,<:Any}) -Gets information about backups of AWS CloudHSM clusters. This is a paginated operation, -which means that each response might contain only a subset of all the backups. When the -response contains only a subset of backups, it includes a NextToken value. Use this value -in a subsequent DescribeBackups request to get more backups. When you receive a response -with no NextToken (or an empty or null value), that means there are no more backups to get. +Gets information about backups of CloudHSM clusters. Lists either the backups you own or +the backups shared with you when the Shared parameter is true. This is a paginated +operation, which means that each response might contain only a subset of all the backups. +When the response contains only a subset of backups, it includes a NextToken value. Use +this value in a subsequent DescribeBackups request to get more backups. When you receive a +response with no NextToken (or an empty or null value), that means there are no more +backups to get. Cross-account use: Yes. Customers can describe backups in other Amazon Web +Services accounts that are shared with them. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -296,6 +341,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys more backups than the number you specify, the response contains a NextToken value. - `"NextToken"`: The NextToken value that you received in the previous response. Use this value to get more backups. +- `"Shared"`: Describe backups that are shared with you. By default when using this + option, the command returns backups that have been shared using a standard Resource Access + Manager resource share. In order for a backup that was shared using the PutResourcePolicy + command to be returned, the share must be promoted to a standard resource share using the + RAM PromoteResourceShareCreatedFromPolicy API operation. For more information about sharing + backups, see Working with shared backups in the CloudHSM User Guide. - `"SortAscending"`: Designates whether or not to sort the return backups by ascending chronological order of generation. """ @@ -316,11 +367,13 @@ end describe_clusters() describe_clusters(params::Dict{String,<:Any}) -Gets information about AWS CloudHSM clusters. This is a paginated operation, which means -that each response might contain only a subset of all the clusters. When the response -contains only a subset of clusters, it includes a NextToken value. Use this value in a -subsequent DescribeClusters request to get more clusters. When you receive a response with -no NextToken (or an empty or null value), that means there are no more clusters to get. +Gets information about CloudHSM clusters. This is a paginated operation, which means that +each response might contain only a subset of all the clusters. When the response contains +only a subset of clusters, it includes a NextToken value. Use this value in a subsequent +DescribeClusters request to get more clusters. When you receive a response with no +NextToken (or an empty or null value), that means there are no more clusters to get. +Cross-account use: No. You cannot perform this operation on CloudHSM clusters in a +different Amazon Web Services account. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -347,14 +400,40 @@ function describe_clusters( ) end +""" + get_resource_policy() + get_resource_policy(params::Dict{String,<:Any}) + + Retrieves the resource policy document attached to a given resource. Cross-account use: +No. You cannot perform this operation on an CloudHSM resource in a different Amazon Web +Services account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ResourceArn"`: Amazon Resource Name (ARN) of the resource to which a policy is attached. +""" +function get_resource_policy(; aws_config::AbstractAWSConfig=global_aws_config()) + return cloudhsm_v2( + "GetResourcePolicy"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function get_resource_policy( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudhsm_v2( + "GetResourcePolicy", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + """ initialize_cluster(cluster_id, signed_cert, trust_anchor) initialize_cluster(cluster_id, signed_cert, trust_anchor, params::Dict{String,<:Any}) -Claims an AWS CloudHSM cluster by submitting the cluster certificate issued by your issuing +Claims an CloudHSM cluster by submitting the cluster certificate issued by your issuing certificate authority (CA) and the CA's root certificate. Before you can claim a cluster, you must sign the cluster's certificate signing request (CSR) with your issuing CA. To get -the cluster's CSR, use DescribeClusters. +the cluster's CSR, use DescribeClusters. Cross-account use: No. You cannot perform this +operation on an CloudHSM cluster in a different Amazon Web Services account. # Arguments - `cluster_id`: The identifier (ID) of the cluster that you are claiming. To find the @@ -412,11 +491,13 @@ end list_tags(resource_id) list_tags(resource_id, params::Dict{String,<:Any}) -Gets a list of tags for the specified AWS CloudHSM cluster. This is a paginated operation, +Gets a list of tags for the specified CloudHSM cluster. This is a paginated operation, which means that each response might contain only a subset of all the tags. When the response contains only a subset of tags, it includes a NextToken value. Use this value in a subsequent ListTags request to get more tags. When you receive a response with no NextToken -(or an empty or null value), that means there are no more tags to get. +(or an empty or null value), that means there are no more tags to get. Cross-account use: +No. You cannot perform this operation on an CloudHSM resource in a different Amazon Web +Services account. # Arguments - `resource_id`: The cluster identifier (ID) for the cluster whose tags you are getting. To @@ -456,7 +537,8 @@ end modify_backup_attributes(backup_id, never_expires) modify_backup_attributes(backup_id, never_expires, params::Dict{String,<:Any}) -Modifies attributes for AWS CloudHSM backup. +Modifies attributes for CloudHSM backup. Cross-account use: No. You cannot perform this +operation on an CloudHSM backup in a different Amazon Web Services account. # Arguments - `backup_id`: The identifier (ID) of the backup to modify. To find the ID of a backup, use @@ -500,7 +582,8 @@ end modify_cluster(backup_retention_policy, cluster_id) modify_cluster(backup_retention_policy, cluster_id, params::Dict{String,<:Any}) -Modifies AWS CloudHSM cluster. +Modifies CloudHSM cluster. Cross-account use: No. You cannot perform this operation on an +CloudHSM cluster in a different Amazon Web Services account. # Arguments - `backup_retention_policy`: A policy that defines how the service retains backups. @@ -543,12 +626,52 @@ function modify_cluster( ) end +""" + put_resource_policy() + put_resource_policy(params::Dict{String,<:Any}) + +Creates or updates an CloudHSM resource policy. A resource policy helps you to define the +IAM entity (for example, an Amazon Web Services account) that can manage your CloudHSM +resources. The following resources support CloudHSM resource policies: Backup - The +resource policy allows you to describe the backup and restore a cluster from the backup in +another Amazon Web Services account. In order to share a backup, it must be in a 'READY' +state and you must own it. While you can share a backup using the CloudHSM +PutResourcePolicy operation, we recommend using Resource Access Manager (RAM) instead. +Using RAM provides multiple benefits as it creates the policy for you, allows multiple +resources to be shared at one time, and increases the discoverability of shared resources. +If you use PutResourcePolicy and want consumers to be able to describe the backups you +share with them, you must promote the backup to a standard RAM Resource Share using the RAM +PromoteResourceShareCreatedFromPolicy API operation. For more information, see Working +with shared backups in the CloudHSM User Guide Cross-account use: No. You cannot perform +this operation on an CloudHSM resource in a different Amazon Web Services account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Policy"`: The policy you want to associate with a resource. For an example policy, see + Working with shared backups in the CloudHSM User Guide +- `"ResourceArn"`: Amazon Resource Name (ARN) of the resource to which you want to attach a + policy. +""" +function put_resource_policy(; aws_config::AbstractAWSConfig=global_aws_config()) + return cloudhsm_v2( + "PutResourcePolicy"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function put_resource_policy( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudhsm_v2( + "PutResourcePolicy", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + """ restore_backup(backup_id) restore_backup(backup_id, params::Dict{String,<:Any}) -Restores a specified AWS CloudHSM backup that is in the PENDING_DELETION state. For mor -information on deleting a backup, see DeleteBackup. +Restores a specified CloudHSM backup that is in the PENDING_DELETION state. For more +information on deleting a backup, see DeleteBackup. Cross-account use: No. You cannot +perform this operation on an CloudHSM backup in a different Amazon Web Services account. # Arguments - `backup_id`: The ID of the backup to be restored. To find the ID of a backup, use the @@ -582,7 +705,9 @@ end tag_resource(resource_id, tag_list) tag_resource(resource_id, tag_list, params::Dict{String,<:Any}) -Adds or overwrites one or more tags for the specified AWS CloudHSM cluster. +Adds or overwrites one or more tags for the specified CloudHSM cluster. Cross-account use: +No. You cannot perform this operation on an CloudHSM resource in a different Amazon Web +Services account. # Arguments - `resource_id`: The cluster identifier (ID) for the cluster that you are tagging. To find @@ -624,7 +749,9 @@ end untag_resource(resource_id, tag_key_list) untag_resource(resource_id, tag_key_list, params::Dict{String,<:Any}) -Removes the specified tag or tags from the specified AWS CloudHSM cluster. +Removes the specified tag or tags from the specified CloudHSM cluster. Cross-account use: +No. You cannot perform this operation on an CloudHSM resource in a different Amazon Web +Services account. # Arguments - `resource_id`: The cluster identifier (ID) for the cluster whose tags you are removing. diff --git a/src/services/cloudwatch_logs.jl b/src/services/cloudwatch_logs.jl index 7a64d4447d..dfc5291c49 100644 --- a/src/services/cloudwatch_logs.jl +++ b/src/services/cloudwatch_logs.jl @@ -139,6 +139,12 @@ can't update an existing delivery. You can only create and delete deliveries. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"fieldDelimiter"`: The field delimiter to use between record fields when the final + output format of a delivery is in Plain, W3C, or Raw format. +- `"recordFields"`: The list of record fields to be delivered to the destination, in order. + If the delivery’s log source has mandatory fields, they must be included in this list. +- `"s3DeliveryConfiguration"`: This structure contains parameters that are valid only when + the delivery’s delivery destination is an S3 bucket. - `"tags"`: An optional list of key-value pairs to associate with the resource. For more information about tagging, see Tagging Amazon Web Services resources """ @@ -1071,6 +1077,49 @@ function describe_account_policies( ) end +""" + describe_configuration_templates() + describe_configuration_templates(params::Dict{String,<:Any}) + +Use this operation to return the valid and default values that are used when creating +delivery sources, delivery destinations, and deliveries. For more information about +deliveries, see CreateDelivery. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"deliveryDestinationTypes"`: Use this parameter to filter the response to include only + the configuration templates that apply to the delivery destination types that you specify + here. +- `"limit"`: Use this parameter to limit the number of configuration templates that are + returned in the response. +- `"logTypes"`: Use this parameter to filter the response to include only the configuration + templates that apply to the log types that you specify here. +- `"nextToken"`: +- `"resourceTypes"`: Use this parameter to filter the response to include only the + configuration templates that apply to the resource types that you specify here. +- `"service"`: Use this parameter to filter the response to include only the configuration + templates that apply to the Amazon Web Services service that you specify here. +""" +function describe_configuration_templates(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudwatch_logs( + "DescribeConfigurationTemplates"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_configuration_templates( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudwatch_logs( + "DescribeConfigurationTemplates", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_deliveries() describe_deliveries(params::Dict{String,<:Any}) @@ -1593,10 +1642,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys logGroupIdentifier or logGroupName, but not both. - `"logStreamNamePrefix"`: Filters the results to include only events from log streams that have names starting with this prefix. If you specify a value for both logStreamNamePrefix - and logStreamNames, but the value for logStreamNamePrefix does not match any log stream - names specified in logStreamNames, the action returns an InvalidParameterException error. + and logStreamNames, the action returns an InvalidParameterException error. - `"logStreamNames"`: Filters the results to only logs from the log streams in this list. - If you specify a value for both logStreamNamePrefix and logStreamNames, the action returns + If you specify a value for both logStreamNames and logStreamNamePrefix, the action returns an InvalidParameterException error. - `"nextToken"`: The token for the next set of events to return. (You received this token from a previous call.) @@ -2197,10 +2245,10 @@ account as the subscription policy, for same-account delivery. A Lambda functi same account as the subscription policy, for same-account delivery. A logical destination in a different account created with PutDestination, for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations. Each account can have -one account-level subscription filter policy. If you are updating an existing filter, you -must specify the correct name in PolicyName. To perform a PutAccountPolicy subscription -filter operation for any destination except a Lambda function, you must also have the -iam:PassRole permission. +one account-level subscription filter policy per Region. If you are updating an existing +filter, you must specify the correct name in PolicyName. To perform a PutAccountPolicy +subscription filter operation for any destination except a Lambda function, you must also +have the iam:PassRole permission. # Arguments - `policy_document`: Specify the policy, in JSON. Data protection policy A data @@ -2233,7 +2281,7 @@ iam:PassRole permission. grants CloudWatch Logs permissions to deliver ingested log events to the destination stream. You don't need to provide the ARN when you are working with a logical destination for cross-account delivery. FilterPattern A filter pattern for subscribing to a filtered - stream of log events. DistributionThe method used to distribute log data to the + stream of log events. Distribution The method used to distribute log data to the destination. By default, log data is grouped by log stream, but the grouping can be set to Random for a more even distribution. This property is only applicable when the destination is an Kinesis Data Streams data stream. @@ -2537,11 +2585,11 @@ delivery source, all the current delivery source parameters are overwritten with parameter values that you specify. # Arguments -- `log_type`: Defines the type of log that the source is sending. For Amazon - CodeWhisperer, the valid value is EVENT_LOGS. For IAM Identity Centerr, the valid value - is ERROR_LOGS. For Amazon WorkMail, the valid values are ACCESS_CONTROL_LOGS, - AUTHENTICATION_LOGS, WORKMAIL_AVAILABILITY_PROVIDER_LOGS, and WORKMAIL_MAILBOX_ACCESS_LOGS. - +- `log_type`: Defines the type of log that the source is sending. For Amazon Bedrock, the + valid value is APPLICATION_LOGS. For Amazon CodeWhisperer, the valid value is EVENT_LOGS. + For IAM Identity Center, the valid value is ERROR_LOGS. For Amazon WorkMail, the valid + values are ACCESS_CONTROL_LOGS, AUTHENTICATION_LOGS, WORKMAIL_AVAILABILITY_PROVIDER_LOGS, + and WORKMAIL_MAILBOX_ACCESS_LOGS. - `name`: A name for this delivery source. This name must be unique for all delivery sources in your account. - `resource_arn`: The ARN of the Amazon Web Services resource that is generating and @@ -2743,6 +2791,7 @@ access key ID or secret key. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"entity"`: Reserved for internal use. - `"sequenceToken"`: The sequence token obtained from the response of the previous PutLogEvents call. The sequenceToken parameter is now ignored in PutLogEvents actions. PutLogEvents actions are now accepted and never return InvalidSequenceTokenException or @@ -2797,16 +2846,20 @@ end Creates or updates a metric filter and associates it with the specified log group. With metric filters, you can configure rules to extract metric data from log events ingested through PutLogEvents. The maximum number of metric filters that can be associated with a -log group is 100. When you create a metric filter, you can also optionally assign a unit -and dimensions to the metric that is created. Metrics extracted from log events are -charged as custom metrics. To prevent unexpected high charges, do not specify -high-cardinality fields such as IPAddress or requestID as dimensions. Each different value -found for a dimension is treated as a separate metric and accrues charges as a separate -custom metric. CloudWatch Logs might disable a metric filter if it generates 1,000 -different name/value pairs for your specified dimensions within one hour. You can also set -up a billing alarm to alert you if your charges are higher than expected. For more -information, see Creating a Billing Alarm to Monitor Your Estimated Amazon Web Services -Charges. +log group is 100. Using regular expressions to create metric filters is supported. For +these filters, there is a quotas of quota of two regular expression patterns within a +single filter pattern. There is also a quota of five regular expression patterns per log +group. For more information about using regular expressions in metric filters, see Filter +pattern syntax for metric filters, subscription filters, filter log events, and Live Tail. +When you create a metric filter, you can also optionally assign a unit and dimensions to +the metric that is created. Metrics extracted from log events are charged as custom +metrics. To prevent unexpected high charges, do not specify high-cardinality fields such as +IPAddress or requestID as dimensions. Each different value found for a dimension is treated +as a separate metric and accrues charges as a separate custom metric. CloudWatch Logs +might disable a metric filter if it generates 1,000 different name/value pairs for your +specified dimensions within one hour. You can also set up a billing alarm to alert you if +your charges are higher than expected. For more information, see Creating a Billing Alarm +to Monitor Your Estimated Amazon Web Services Charges. # Arguments - `filter_name`: A name for the metric filter. @@ -3046,9 +3099,14 @@ logical destinations. An Amazon Kinesis Data Firehose delivery stream that bel same account as the subscription filter, for same-account delivery. An Lambda function that belongs to the same account as the subscription filter, for same-account delivery. Each log group can have up to two subscription filters associated with it. If you are -updating an existing filter, you must specify the correct name in filterName. To perform a -PutSubscriptionFilter operation for any destination except a Lambda function, you must also -have the iam:PassRole permission. +updating an existing filter, you must specify the correct name in filterName. Using +regular expressions to create subscription filters is supported. For these filters, there +is a quotas of quota of two regular expression patterns within a single filter pattern. +There is also a quota of five regular expression patterns per log group. For more +information about using regular expressions in subscription filters, see Filter pattern +syntax for metric filters, subscription filters, filter log events, and Live Tail. To +perform a PutSubscriptionFilter operation for any destination except a Lambda function, you +must also have the iam:PassRole permission. # Arguments - `destination_arn`: The ARN of the destination to deliver matching log events to. @@ -3609,6 +3667,47 @@ function update_anomaly( ) end +""" + update_delivery_configuration(id) + update_delivery_configuration(id, params::Dict{String,<:Any}) + +Use this operation to update the configuration of a delivery to change either the S3 path +pattern or the format of the delivered logs. You can't use this operation to change the +source or destination of the delivery. + +# Arguments +- `id`: The ID of the delivery to be updated by this request. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"fieldDelimiter"`: The field delimiter to use between record fields when the final + output format of a delivery is in Plain, W3C, or Raw format. +- `"recordFields"`: The list of record fields to be delivered to the destination, in order. + If the delivery’s log source has mandatory fields, they must be included in this list. +- `"s3DeliveryConfiguration"`: This structure contains parameters that are valid only when + the delivery’s delivery destination is an S3 bucket. +""" +function update_delivery_configuration( + id; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudwatch_logs( + "UpdateDeliveryConfiguration", + Dict{String,Any}("id" => id); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_delivery_configuration( + id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return cloudwatch_logs( + "UpdateDeliveryConfiguration", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("id" => id), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_log_anomaly_detector(anomaly_detector_arn, enabled) update_log_anomaly_detector(anomaly_detector_arn, enabled, params::Dict{String,<:Any}) diff --git a/src/services/codebuild.jl b/src/services/codebuild.jl index e4a06a4f61..9fa7c904c7 100644 --- a/src/services/codebuild.jl +++ b/src/services/codebuild.jl @@ -257,14 +257,17 @@ Creates a compute fleet. Pacific (Singapore), Asia Pacific (Sydney), South America (São Paulo), and Asia Pacific (Mumbai). The environment type LINUX_GPU_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific - (Tokyo), and Asia Pacific (Sydney). The environment type WINDOWS_SERVER_2019_CONTAINER is - available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia - Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland). The - environment type WINDOWS_SERVER_2022_CONTAINER is available only in regions US East (N. - Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific - (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (São Paulo) and - Asia Pacific (Mumbai). For more information, see Build environment compute types in the - CodeBuild user guide. + (Tokyo), and Asia Pacific (Sydney). The environment type MAC_ARM is available for Medium + fleets only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia + Pacific (Sydney), and EU (Frankfurt) The environment type MAC_ARM is available for Large + fleets only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), and Asia + Pacific (Sydney). The environment type WINDOWS_SERVER_2019_CONTAINER is available only in + regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), + Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland). The environment type + WINDOWS_SERVER_2022_CONTAINER is available only in regions US East (N. Virginia), US East + (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific + (Singapore), Asia Pacific (Tokyo), South America (São Paulo) and Asia Pacific (Mumbai). + For more information, see Build environment compute types in the CodeBuild user guide. - `name`: The name of the compute fleet. # Optional Parameters @@ -272,6 +275,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"fleetServiceRole"`: The service role associated with the compute fleet. For more information, see Allow a user to add a permission policy for a fleet service role in the CodeBuild User Guide. +- `"imageId"`: The Amazon Machine Image (AMI) of the compute fleet. - `"overflowBehavior"`: The compute fleet overflow behavior. For overflow behavior QUEUE, your overflow builds need to wait on the existing fleet instance to become available. For overflow behavior ON_DEMAND, your overflow builds run on CodeBuild on-demand. If you @@ -997,17 +1001,18 @@ end import_source_credentials(auth_type, server_type, token, params::Dict{String,<:Any}) Imports the source repository credentials for an CodeBuild project that has its source -code stored in a GitHub, GitHub Enterprise, or Bitbucket repository. +code stored in a GitHub, GitHub Enterprise, GitLab, GitLab Self Managed, or Bitbucket +repository. # Arguments - `auth_type`: The type of authentication used to connect to a GitHub, GitHub Enterprise, GitLab, GitLab Self Managed, or Bitbucket repository. An OAUTH connection is not supported - by the API and must be created using the CodeBuild console. Note that CODECONNECTIONS is - only valid for GitLab and GitLab Self Managed. + by the API and must be created using the CodeBuild console. - `server_type`: The source provider used for this project. - `token`: For GitHub or GitHub Enterprise, this is the personal access token. For Bitbucket, this is either the access token or the app password. For the authType - CODECONNECTIONS, this is the connectionArn. + CODECONNECTIONS, this is the connectionArn. For the authType SECRETS_MANAGER, this is the + secretArn. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2017,17 +2022,21 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Pacific (Singapore), Asia Pacific (Sydney), South America (São Paulo), and Asia Pacific (Mumbai). The environment type LINUX_GPU_CONTAINER is available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific - (Tokyo), and Asia Pacific (Sydney). The environment type WINDOWS_SERVER_2019_CONTAINER is - available only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia - Pacific (Sydney), Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland). The - environment type WINDOWS_SERVER_2022_CONTAINER is available only in regions US East (N. - Virginia), US East (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific - (Sydney), Asia Pacific (Singapore), Asia Pacific (Tokyo), South America (São Paulo) and - Asia Pacific (Mumbai). For more information, see Build environment compute types in the - CodeBuild user guide. + (Tokyo), and Asia Pacific (Sydney). The environment type MAC_ARM is available for Medium + fleets only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia + Pacific (Sydney), and EU (Frankfurt) The environment type MAC_ARM is available for Large + fleets only in regions US East (N. Virginia), US East (Ohio), US West (Oregon), and Asia + Pacific (Sydney). The environment type WINDOWS_SERVER_2019_CONTAINER is available only in + regions US East (N. Virginia), US East (Ohio), US West (Oregon), Asia Pacific (Sydney), + Asia Pacific (Tokyo), Asia Pacific (Mumbai) and EU (Ireland). The environment type + WINDOWS_SERVER_2022_CONTAINER is available only in regions US East (N. Virginia), US East + (Ohio), US West (Oregon), EU (Ireland), EU (Frankfurt), Asia Pacific (Sydney), Asia Pacific + (Singapore), Asia Pacific (Tokyo), South America (São Paulo) and Asia Pacific (Mumbai). + For more information, see Build environment compute types in the CodeBuild user guide. - `"fleetServiceRole"`: The service role associated with the compute fleet. For more information, see Allow a user to add a permission policy for a fleet service role in the CodeBuild User Guide. +- `"imageId"`: The Amazon Machine Image (AMI) of the compute fleet. - `"overflowBehavior"`: The compute fleet overflow behavior. For overflow behavior QUEUE, your overflow builds need to wait on the existing fleet instance to become available. For overflow behavior ON_DEMAND, your overflow builds run on CodeBuild on-demand. If you diff --git a/src/services/codepipeline.jl b/src/services/codepipeline.jl index 50afd54dac..c58d674542 100644 --- a/src/services/codepipeline.jl +++ b/src/services/codepipeline.jl @@ -872,6 +872,76 @@ function list_pipelines( ) end +""" + list_rule_executions(pipeline_name) + list_rule_executions(pipeline_name, params::Dict{String,<:Any}) + +Lists the rule executions that have occurred in a pipeline configured for conditions with +rules. + +# Arguments +- `pipeline_name`: The name of the pipeline for which you want to get execution summary + information. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filter"`: Input information used to filter rule execution history. +- `"maxResults"`: The maximum number of results to return in a single call. To retrieve the + remaining results, make another call with the returned nextToken value. Pipeline history is + limited to the most recent 12 months, based on pipeline execution start times. Default + value is 100. +- `"nextToken"`: The token that was returned from the previous ListRuleExecutions call, + which can be used to return the next set of rule executions in the list. +""" +function list_rule_executions( + pipelineName; aws_config::AbstractAWSConfig=global_aws_config() +) + return codepipeline( + "ListRuleExecutions", + Dict{String,Any}("pipelineName" => pipelineName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_rule_executions( + pipelineName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codepipeline( + "ListRuleExecutions", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("pipelineName" => pipelineName), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_rule_types() + list_rule_types(params::Dict{String,<:Any}) + +Lists the rules for the condition. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"regionFilter"`: The rule Region to filter on. +- `"ruleOwnerFilter"`: The rule owner to filter on. +""" +function list_rule_types(; aws_config::AbstractAWSConfig=global_aws_config()) + return codepipeline( + "ListRuleTypes"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_rule_types( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return codepipeline( + "ListRuleTypes", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + """ list_tags_for_resource(resource_arn) list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) @@ -919,7 +989,7 @@ end Gets a listing of all the webhooks in this Amazon Web Services Region for this account. The output lists all webhooks and includes the webhook URL and ARN and the configuration for -each webhook. +each webhook. If a secret token was provided, it will be redacted in the response. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -941,6 +1011,66 @@ function list_webhooks( ) end +""" + override_stage_condition(condition_type, pipeline_execution_id, pipeline_name, stage_name) + override_stage_condition(condition_type, pipeline_execution_id, pipeline_name, stage_name, params::Dict{String,<:Any}) + +Used to override a stage condition. + +# Arguments +- `condition_type`: The type of condition to override for the stage, such as entry + conditions, failure conditions, or success conditions. +- `pipeline_execution_id`: The ID of the pipeline execution for the override. +- `pipeline_name`: The name of the pipeline with the stage that will override the condition. +- `stage_name`: The name of the stage for the override. + +""" +function override_stage_condition( + conditionType, + pipelineExecutionId, + pipelineName, + stageName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codepipeline( + "OverrideStageCondition", + Dict{String,Any}( + "conditionType" => conditionType, + "pipelineExecutionId" => pipelineExecutionId, + "pipelineName" => pipelineName, + "stageName" => stageName, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function override_stage_condition( + conditionType, + pipelineExecutionId, + pipelineName, + stageName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return codepipeline( + "OverrideStageCondition", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "conditionType" => conditionType, + "pipelineExecutionId" => pipelineExecutionId, + "pipelineName" => pipelineName, + "stageName" => stageName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ poll_for_jobs(action_type_id) poll_for_jobs(action_type_id, params::Dict{String,<:Any}) @@ -1360,7 +1490,14 @@ change. When CodePipeline receives a POST request on this URL, the pipeline defi webhook is started as long as the POST request satisfied the authentication and filtering requirements supplied when defining the webhook. RegisterWebhookWithThirdParty and DeregisterWebhookWithThirdParty APIs can be used to automatically configure supported third -parties to call the generated webhook URL. +parties to call the generated webhook URL. When creating CodePipeline webhooks, do not use +your own credentials or reuse the same secret token across multiple webhooks. For optimal +security, generate a unique secret token for each webhook you create. The secret token is +an arbitrary string that you provide, which GitHub uses to compute and sign the webhook +payloads sent to CodePipeline, for protecting the integrity and authenticity of the webhook +payloads. Using your own credentials or reusing the same token across multiple webhooks can +lead to security vulnerabilities. If a secret token was provided, it will be redacted in +the response. # Arguments - `webhook`: The detail provided in an input file to create the webhook, such as the diff --git a/src/services/codestar.jl b/src/services/codestar.jl deleted file mode 100644 index 6e253b2135..0000000000 --- a/src/services/codestar.jl +++ /dev/null @@ -1,701 +0,0 @@ -# This file is auto-generated by AWSMetadata.jl -using AWS -using AWS.AWSServices: codestar -using AWS.Compat -using AWS.UUIDs - -""" - associate_team_member(project_id, project_role, user_arn) - associate_team_member(project_id, project_role, user_arn, params::Dict{String,<:Any}) - -Adds an IAM user to the team for an AWS CodeStar project. - -# Arguments -- `project_id`: The ID of the project to which you will add the IAM user. -- `project_role`: The AWS CodeStar project role that will apply to this user. This role - determines what actions a user can take in an AWS CodeStar project. -- `user_arn`: The Amazon Resource Name (ARN) for the IAM user you want to add to the AWS - CodeStar project. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"clientRequestToken"`: A user- or system-generated token that identifies the entity that - requested the team member association to the project. This token can be used to repeat the - request. -- `"remoteAccessAllowed"`: Whether the team member is allowed to use an SSH public/private - key pair to remotely access project resources, for example Amazon EC2 instances. -""" -function associate_team_member( - projectId, projectRole, userArn; aws_config::AbstractAWSConfig=global_aws_config() -) - return codestar( - "AssociateTeamMember", - Dict{String,Any}( - "projectId" => projectId, "projectRole" => projectRole, "userArn" => userArn - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function associate_team_member( - projectId, - projectRole, - userArn, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return codestar( - "AssociateTeamMember", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "projectId" => projectId, - "projectRole" => projectRole, - "userArn" => userArn, - ), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - create_project(id, name) - create_project(id, name, params::Dict{String,<:Any}) - -Creates a project, including project resources. This action creates a project based on a -submitted project request. A set of source code files and a toolchain template file can be -included with the project request. If these are not provided, an empty project is created. - -# Arguments -- `id`: The ID of the project to be created in AWS CodeStar. -- `name`: The display name for the project to be created in AWS CodeStar. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"clientRequestToken"`: A user- or system-generated token that identifies the entity that - requested project creation. This token can be used to repeat the request. -- `"description"`: The description of the project, if any. -- `"sourceCode"`: A list of the Code objects submitted with the project request. If this - parameter is specified, the request must also include the toolchain parameter. -- `"tags"`: The tags created for the project. -- `"toolchain"`: The name of the toolchain template file submitted with the project - request. If this parameter is specified, the request must also include the sourceCode - parameter. -""" -function create_project(id, name; aws_config::AbstractAWSConfig=global_aws_config()) - return codestar( - "CreateProject", - Dict{String,Any}("id" => id, "name" => name); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function create_project( - id, - name, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return codestar( - "CreateProject", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("id" => id, "name" => name), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - create_user_profile(display_name, email_address, user_arn) - create_user_profile(display_name, email_address, user_arn, params::Dict{String,<:Any}) - -Creates a profile for a user that includes user preferences, such as the display name and -email address assocciated with the user, in AWS CodeStar. The user profile is not -project-specific. Information in the user profile is displayed wherever the user's -information appears to other users in AWS CodeStar. - -# Arguments -- `display_name`: The name that will be displayed as the friendly name for the user in AWS - CodeStar. -- `email_address`: The email address that will be displayed as part of the user's profile - in AWS CodeStar. -- `user_arn`: The Amazon Resource Name (ARN) of the user in IAM. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"sshPublicKey"`: The SSH public key associated with the user in AWS CodeStar. If a - project owner allows the user remote access to project resources, this public key will be - used along with the user's private key for SSH access. -""" -function create_user_profile( - displayName, emailAddress, userArn; aws_config::AbstractAWSConfig=global_aws_config() -) - return codestar( - "CreateUserProfile", - Dict{String,Any}( - "displayName" => displayName, - "emailAddress" => emailAddress, - "userArn" => userArn, - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function create_user_profile( - displayName, - emailAddress, - userArn, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return codestar( - "CreateUserProfile", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "displayName" => displayName, - "emailAddress" => emailAddress, - "userArn" => userArn, - ), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - delete_project(id) - delete_project(id, params::Dict{String,<:Any}) - -Deletes a project, including project resources. Does not delete users associated with the -project, but does delete the IAM roles that allowed access to the project. - -# Arguments -- `id`: The ID of the project to be deleted in AWS CodeStar. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"clientRequestToken"`: A user- or system-generated token that identifies the entity that - requested project deletion. This token can be used to repeat the request. -- `"deleteStack"`: Whether to send a delete request for the primary stack in AWS - CloudFormation originally used to generate the project and its resources. This option will - delete all AWS resources for the project (except for any buckets in Amazon S3) as well as - deleting the project itself. Recommended for most use cases. -""" -function delete_project(id; aws_config::AbstractAWSConfig=global_aws_config()) - return codestar( - "DeleteProject", - Dict{String,Any}("id" => id); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function delete_project( - id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return codestar( - "DeleteProject", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("id" => id), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - delete_user_profile(user_arn) - delete_user_profile(user_arn, params::Dict{String,<:Any}) - -Deletes a user profile in AWS CodeStar, including all personal preference data associated -with that profile, such as display name and email address. It does not delete the history -of that user, for example the history of commits made by that user. - -# Arguments -- `user_arn`: The Amazon Resource Name (ARN) of the user to delete from AWS CodeStar. - -""" -function delete_user_profile(userArn; aws_config::AbstractAWSConfig=global_aws_config()) - return codestar( - "DeleteUserProfile", - Dict{String,Any}("userArn" => userArn); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function delete_user_profile( - userArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return codestar( - "DeleteUserProfile", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("userArn" => userArn), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - describe_project(id) - describe_project(id, params::Dict{String,<:Any}) - -Describes a project and its resources. - -# Arguments -- `id`: The ID of the project. - -""" -function describe_project(id; aws_config::AbstractAWSConfig=global_aws_config()) - return codestar( - "DescribeProject", - Dict{String,Any}("id" => id); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function describe_project( - id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return codestar( - "DescribeProject", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("id" => id), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - describe_user_profile(user_arn) - describe_user_profile(user_arn, params::Dict{String,<:Any}) - -Describes a user in AWS CodeStar and the user attributes across all projects. - -# Arguments -- `user_arn`: The Amazon Resource Name (ARN) of the user. - -""" -function describe_user_profile(userArn; aws_config::AbstractAWSConfig=global_aws_config()) - return codestar( - "DescribeUserProfile", - Dict{String,Any}("userArn" => userArn); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function describe_user_profile( - userArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return codestar( - "DescribeUserProfile", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("userArn" => userArn), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - disassociate_team_member(project_id, user_arn) - disassociate_team_member(project_id, user_arn, params::Dict{String,<:Any}) - -Removes a user from a project. Removing a user from a project also removes the IAM policies -from that user that allowed access to the project and its resources. Disassociating a team -member does not remove that user's profile from AWS CodeStar. It does not remove the user -from IAM. - -# Arguments -- `project_id`: The ID of the AWS CodeStar project from which you want to remove a team - member. -- `user_arn`: The Amazon Resource Name (ARN) of the IAM user or group whom you want to - remove from the project. - -""" -function disassociate_team_member( - projectId, userArn; aws_config::AbstractAWSConfig=global_aws_config() -) - return codestar( - "DisassociateTeamMember", - Dict{String,Any}("projectId" => projectId, "userArn" => userArn); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function disassociate_team_member( - projectId, - userArn, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return codestar( - "DisassociateTeamMember", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}("projectId" => projectId, "userArn" => userArn), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - list_projects() - list_projects(params::Dict{String,<:Any}) - -Lists all projects in AWS CodeStar associated with your AWS account. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: The maximum amount of data that can be contained in a single set of - results. -- `"nextToken"`: The continuation token to be used to return the next set of results, if - the results cannot be returned in one response. -""" -function list_projects(; aws_config::AbstractAWSConfig=global_aws_config()) - return codestar("ListProjects"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) -end -function list_projects( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return codestar( - "ListProjects", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - list_resources(project_id) - list_resources(project_id, params::Dict{String,<:Any}) - -Lists resources associated with a project in AWS CodeStar. - -# Arguments -- `project_id`: The ID of the project. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: The maximum amount of data that can be contained in a single set of - results. -- `"nextToken"`: The continuation token for the next set of results, if the results cannot - be returned in one response. -""" -function list_resources(projectId; aws_config::AbstractAWSConfig=global_aws_config()) - return codestar( - "ListResources", - Dict{String,Any}("projectId" => projectId); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function list_resources( - projectId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return codestar( - "ListResources", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("projectId" => projectId), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - list_tags_for_project(id) - list_tags_for_project(id, params::Dict{String,<:Any}) - -Gets the tags for a project. - -# Arguments -- `id`: The ID of the project to get tags for. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: Reserved for future use. -- `"nextToken"`: Reserved for future use. -""" -function list_tags_for_project(id; aws_config::AbstractAWSConfig=global_aws_config()) - return codestar( - "ListTagsForProject", - Dict{String,Any}("id" => id); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function list_tags_for_project( - id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return codestar( - "ListTagsForProject", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("id" => id), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - list_team_members(project_id) - list_team_members(project_id, params::Dict{String,<:Any}) - -Lists all team members associated with a project. - -# Arguments -- `project_id`: The ID of the project for which you want to list team members. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: The maximum number of team members you want returned in a response. -- `"nextToken"`: The continuation token for the next set of results, if the results cannot - be returned in one response. -""" -function list_team_members(projectId; aws_config::AbstractAWSConfig=global_aws_config()) - return codestar( - "ListTeamMembers", - Dict{String,Any}("projectId" => projectId); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function list_team_members( - projectId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return codestar( - "ListTeamMembers", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("projectId" => projectId), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - list_user_profiles() - list_user_profiles(params::Dict{String,<:Any}) - -Lists all the user profiles configured for your AWS account in AWS CodeStar. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: The maximum number of results to return in a response. -- `"nextToken"`: The continuation token for the next set of results, if the results cannot - be returned in one response. -""" -function list_user_profiles(; aws_config::AbstractAWSConfig=global_aws_config()) - return codestar( - "ListUserProfiles"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function list_user_profiles( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return codestar( - "ListUserProfiles", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - tag_project(id, tags) - tag_project(id, tags, params::Dict{String,<:Any}) - -Adds tags to a project. - -# Arguments -- `id`: The ID of the project you want to add a tag to. -- `tags`: The tags you want to add to the project. - -""" -function tag_project(id, tags; aws_config::AbstractAWSConfig=global_aws_config()) - return codestar( - "TagProject", - Dict{String,Any}("id" => id, "tags" => tags); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function tag_project( - id, - tags, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return codestar( - "TagProject", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("id" => id, "tags" => tags), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - untag_project(id, tags) - untag_project(id, tags, params::Dict{String,<:Any}) - -Removes tags from a project. - -# Arguments -- `id`: The ID of the project to remove tags from. -- `tags`: The tags to remove from the project. - -""" -function untag_project(id, tags; aws_config::AbstractAWSConfig=global_aws_config()) - return codestar( - "UntagProject", - Dict{String,Any}("id" => id, "tags" => tags); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function untag_project( - id, - tags, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return codestar( - "UntagProject", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("id" => id, "tags" => tags), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - update_project(id) - update_project(id, params::Dict{String,<:Any}) - -Updates a project in AWS CodeStar. - -# Arguments -- `id`: The ID of the project you want to update. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"description"`: The description of the project, if any. -- `"name"`: The name of the project you want to update. -""" -function update_project(id; aws_config::AbstractAWSConfig=global_aws_config()) - return codestar( - "UpdateProject", - Dict{String,Any}("id" => id); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function update_project( - id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return codestar( - "UpdateProject", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("id" => id), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - update_team_member(project_id, user_arn) - update_team_member(project_id, user_arn, params::Dict{String,<:Any}) - -Updates a team member's attributes in an AWS CodeStar project. For example, you can change -a team member's role in the project, or change whether they have remote access to project -resources. - -# Arguments -- `project_id`: The ID of the project. -- `user_arn`: The Amazon Resource Name (ARN) of the user for whom you want to change team - membership attributes. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"projectRole"`: The role assigned to the user in the project. Project roles have - different levels of access. For more information, see Working with Teams in the AWS - CodeStar User Guide. -- `"remoteAccessAllowed"`: Whether a team member is allowed to remotely access project - resources using the SSH public key associated with the user's profile. Even if this is set - to True, the user must associate a public key with their profile before the user can access - resources. -""" -function update_team_member( - projectId, userArn; aws_config::AbstractAWSConfig=global_aws_config() -) - return codestar( - "UpdateTeamMember", - Dict{String,Any}("projectId" => projectId, "userArn" => userArn); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function update_team_member( - projectId, - userArn, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return codestar( - "UpdateTeamMember", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}("projectId" => projectId, "userArn" => userArn), - params, - ), - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - update_user_profile(user_arn) - update_user_profile(user_arn, params::Dict{String,<:Any}) - -Updates a user's profile in AWS CodeStar. The user profile is not project-specific. -Information in the user profile is displayed wherever the user's information appears to -other users in AWS CodeStar. - -# Arguments -- `user_arn`: The name that will be displayed as the friendly name for the user in AWS - CodeStar. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"displayName"`: The name that is displayed as the friendly name for the user in AWS - CodeStar. -- `"emailAddress"`: The email address that is displayed as part of the user's profile in - AWS CodeStar. -- `"sshPublicKey"`: The SSH public key associated with the user in AWS CodeStar. If a - project owner allows the user remote access to project resources, this public key will be - used along with the user's private key for SSH access. -""" -function update_user_profile(userArn; aws_config::AbstractAWSConfig=global_aws_config()) - return codestar( - "UpdateUserProfile", - Dict{String,Any}("userArn" => userArn); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function update_user_profile( - userArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return codestar( - "UpdateUserProfile", - Dict{String,Any}(mergewith(_merge, Dict{String,Any}("userArn" => userArn), params)); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end diff --git a/src/services/cognito_identity_provider.jl b/src/services/cognito_identity_provider.jl index 3a04a4a714..cb5306e04a 100644 --- a/src/services/cognito_identity_provider.jl +++ b/src/services/cognito_identity_provider.jl @@ -116,19 +116,16 @@ end admin_confirm_sign_up(user_pool_id, username) admin_confirm_sign_up(user_pool_id, username, params::Dict{String,<:Any}) -This IAM-authenticated API operation provides a code that Amazon Cognito sent to your user -when they signed up in your user pool. After your user enters their code, they confirm -ownership of the email address or phone number that they provided, and their user account -becomes active. Depending on your user pool configuration, your users will receive their -confirmation code in an email or SMS message. Local users who signed up in your user pool -are the only type of user who can confirm sign-up with a code. Users who federate through -an external identity provider (IdP) have already been confirmed by their IdP. -Administrator-created users confirm their accounts when they respond to their invitation -email message and choose a password. Amazon Cognito evaluates Identity and Access -Management (IAM) policies in requests for this API operation. For this operation, you must -use IAM credentials to authorize requests, and you must grant yourself the corresponding -IAM permission in a policy. Learn more Signing Amazon Web Services API Requests -Using the Amazon Cognito user pools API and user pool endpoints +This IAM-authenticated API operation confirms user sign-up as an administrator. Unlike +ConfirmSignUp, your IAM credentials authorize user account confirmation. No confirmation +code is required. This request sets a user account active in a user pool that requires +confirmation of new user accounts before they can sign in. You can configure your user pool +to not send confirmation codes to new users and instead confirm them with this API +operation on the back end. Amazon Cognito evaluates Identity and Access Management (IAM) +policies in requests for this API operation. For this operation, you must use IAM +credentials to authorize requests, and you must grant yourself the corresponding IAM +permission in a policy. Learn more Signing Amazon Web Services API Requests Using +the Amazon Cognito user pools API and user pool endpoints # Arguments - `user_pool_id`: The user pool ID for which you want to confirm user registration. @@ -196,17 +193,17 @@ phone number before you can send SMS messages to US phone numbers. If you use SM messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If -you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, -Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox -mode , you can send messages only to verified phone numbers. After you test your app while -in the sandbox environment, you can move out of the sandbox and into production. For more -information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito -Developer Guide. This message is based on a template that you configured in your call to -create or update a user pool. This template includes your custom sign-up instructions and -placeholders for user name and temporary password. Alternatively, you can call -AdminCreateUser with SUPPRESS for the MessageAction parameter, and Amazon Cognito won't -send any email. In either case, the user will be in the FORCE_CHANGE_PASSWORD state until -they sign in and change their password. Amazon Cognito evaluates Identity and Access +you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services +service, Amazon Simple Notification Service might place your account in the SMS sandbox. In + sandbox mode , you can send messages only to verified phone numbers. After you test your +app while in the sandbox environment, you can move out of the sandbox and into production. +For more information, see SMS message settings for Amazon Cognito user pools in the Amazon +Cognito Developer Guide. This message is based on a template that you configured in your +call to create or update a user pool. This template includes your custom sign-up +instructions and placeholders for user name and temporary password. Alternatively, you can +call AdminCreateUser with SUPPRESS for the MessageAction parameter, and Amazon Cognito +won't send any email. In either case, the user will be in the FORCE_CHANGE_PASSWORD state +until they sign in and change their password. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests @@ -764,11 +761,11 @@ SMS text messages in Amazon Cognito, you must register a phone number with Amazo Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any other Amazon -Web Service, Amazon Simple Notification Service might place your account in the SMS -sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you -test your app while in the sandbox environment, you can move out of the sandbox and into -production. For more information, see SMS message settings for Amazon Cognito user pools -in the Amazon Cognito Developer Guide. Amazon Cognito evaluates Identity and Access +Web Services service, Amazon Simple Notification Service might place your account in the +SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After +you test your app while in the sandbox environment, you can move out of the sandbox and +into production. For more information, see SMS message settings for Amazon Cognito user +pools in the Amazon Cognito Developer Guide. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests @@ -1198,19 +1195,19 @@ phone numbers. If you use SMS text messages in Amazon Cognito, you must register number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon -Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place -your account in the SMS sandbox. In sandbox mode , you can send messages only to verified -phone numbers. After you test your app while in the sandbox environment, you can move out -of the sandbox and into production. For more information, see SMS message settings for -Amazon Cognito user pools in the Amazon Cognito Developer Guide. Deactivates a user's -password, requiring them to change it. If a user tries to sign in after the API is called, -Amazon Cognito responds with a PasswordResetRequiredException error. Your app must then -perform the actions that reset your user's password: the forgot-password flow. In addition, -if the user pool has phone verification selected and a verified phone number exists for the -user, or if email verification is selected and a verified email exists for the user, -calling this API will also result in sending a message to the end user with the code to -change their password. Amazon Cognito evaluates Identity and Access Management (IAM) -policies in requests for this API operation. For this operation, you must use IAM +Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might +place your account in the SMS sandbox. In sandbox mode , you can send messages only to +verified phone numbers. After you test your app while in the sandbox environment, you can +move out of the sandbox and into production. For more information, see SMS message +settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Deactivates +a user's password, requiring them to change it. If a user tries to sign in after the API is +called, Amazon Cognito responds with a PasswordResetRequiredException error. Your app must +then perform the actions that reset your user's password: the forgot-password flow. In +addition, if the user pool has phone verification selected and a verified phone number +exists for the user, or if email verification is selected and a verified email exists for +the user, calling this API will also result in sending a message to the end user with the +code to change their password. Amazon Cognito evaluates Identity and Access Management +(IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool endpoints @@ -1287,15 +1284,15 @@ If you use SMS text messages in Amazon Cognito, you must register a phone number Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any -other Amazon Web Service, Amazon Simple Notification Service might place your account in -the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. -After you test your app while in the sandbox environment, you can move out of the sandbox -and into production. For more information, see SMS message settings for Amazon Cognito -user pools in the Amazon Cognito Developer Guide. Amazon Cognito evaluates Identity and -Access Management (IAM) policies in requests for this API operation. For this operation, -you must use IAM credentials to authorize requests, and you must grant yourself the -corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API -Requests Using the Amazon Cognito user pools API and user pool endpoints +other Amazon Web Services service, Amazon Simple Notification Service might place your +account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone +numbers. After you test your app while in the sandbox environment, you can move out of the +sandbox and into production. For more information, see SMS message settings for Amazon +Cognito user pools in the Amazon Cognito Developer Guide. Amazon Cognito evaluates +Identity and Access Management (IAM) policies in requests for this API operation. For this +operation, you must use IAM credentials to authorize requests, and you must grant yourself +the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services +API Requests Using the Amazon Cognito user pools API and user pool endpoints # Arguments - `challenge_name`: The challenge name. For more information, see AdminInitiateAuth. @@ -1741,20 +1738,20 @@ phone numbers. If you use SMS text messages in Amazon Cognito, you must register number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon -Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place -your account in the SMS sandbox. In sandbox mode , you can send messages only to verified -phone numbers. After you test your app while in the sandbox environment, you can move out -of the sandbox and into production. For more information, see SMS message settings for -Amazon Cognito user pools in the Amazon Cognito Developer Guide. Updates the specified -user's attributes, including developer attributes, as an administrator. Works on any user. -To delete an attribute from your user, submit the attribute in your API request with a -blank value. For custom attributes, you must prepend the custom: prefix to the attribute -name. In addition to updating user attributes, this API can also be used to mark phone and -email as verified. Amazon Cognito evaluates Identity and Access Management (IAM) policies -in requests for this API operation. For this operation, you must use IAM credentials to -authorize requests, and you must grant yourself the corresponding IAM permission in a -policy. Learn more Signing Amazon Web Services API Requests Using the Amazon -Cognito user pools API and user pool endpoints +Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might +place your account in the SMS sandbox. In sandbox mode , you can send messages only to +verified phone numbers. After you test your app while in the sandbox environment, you can +move out of the sandbox and into production. For more information, see SMS message +settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Updates the +specified user's attributes, including developer attributes, as an administrator. Works on +any user. To delete an attribute from your user, submit the attribute in your API request +with a blank value. For custom attributes, you must prepend the custom: prefix to the +attribute name. In addition to updating user attributes, this API can also be used to mark +phone and email as verified. Amazon Cognito evaluates Identity and Access Management (IAM) +policies in requests for this API operation. For this operation, you must use IAM +credentials to authorize requests, and you must grant yourself the corresponding IAM +permission in a policy. Learn more Signing Amazon Web Services API Requests Using +the Amazon Cognito user pools API and user pool endpoints # Arguments - `user_attributes`: An array of name-value pairs representing user attributes. For custom @@ -1904,7 +1901,7 @@ token, or a session string from a challenge response that you received from Amaz VerifySoftwareToken API request. If you don't verify the software token and your user pool doesn't require MFA, the user can then authenticate with user name and password credentials alone. If your user pool requires TOTP MFA, Amazon Cognito generates an MFA_SETUP or -SOFTWARE_TOKEN_SETUP challenge each time your user signs. Complete setup with +SOFTWARE_TOKEN_SETUP challenge each time your user signs in. Complete setup with AssociateSoftwareToken and VerifySoftwareToken. After you set up software token MFA for your user, Amazon Cognito generates a SOFTWARE_TOKEN_MFA challenge when they authenticate. Respond to this challenge with your user's TOTP. Amazon Cognito doesn't evaluate Identity @@ -2567,17 +2564,18 @@ phone numbers. If you use SMS text messages in Amazon Cognito, you must register number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon -Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place -your account in the SMS sandbox. In sandbox mode , you can send messages only to verified -phone numbers. After you test your app while in the sandbox environment, you can move out -of the sandbox and into production. For more information, see SMS message settings for -Amazon Cognito user pools in the Amazon Cognito Developer Guide. Creates a new Amazon -Cognito user pool and sets the password policy for the pool. If you don't provide a value -for an attribute, Amazon Cognito sets it to its default value. Amazon Cognito evaluates -Identity and Access Management (IAM) policies in requests for this API operation. For this -operation, you must use IAM credentials to authorize requests, and you must grant yourself -the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services -API Requests Using the Amazon Cognito user pools API and user pool endpoints +Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might +place your account in the SMS sandbox. In sandbox mode , you can send messages only to +verified phone numbers. After you test your app while in the sandbox environment, you can +move out of the sandbox and into production. For more information, see SMS message +settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Creates a +new Amazon Cognito user pool and sets the password policy for the pool. If you don't +provide a value for an attribute, Amazon Cognito sets it to its default value. Amazon +Cognito evaluates Identity and Access Management (IAM) policies in requests for this API +operation. For this operation, you must use IAM credentials to authorize requests, and you +must grant yourself the corresponding IAM permission in a policy. Learn more Signing +Amazon Web Services API Requests Using the Amazon Cognito user pools API and user pool +endpoints # Arguments - `pool_name`: A string used to name the user pool. @@ -2792,7 +2790,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys a simulated destination. When set to LEGACY, those APIs return a UserNotFoundException exception if the user doesn't exist in the user pool. Valid values include: ENABLED - This prevents user existence-related errors. LEGACY - This represents the early behavior - of Amazon Cognito where user existence related errors aren't prevented. + of Amazon Cognito where user existence related errors aren't prevented. Defaults to + LEGACY when you don't provide a value. - `"ReadAttributes"`: The list of user attributes that you want your app client to have read-only access to. After your user authenticates in your app, their access token authorizes them to read their own attribute value for any attribute in this list. An @@ -3605,10 +3604,10 @@ number before you can send SMS messages to US phone numbers. If you use SMS text in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have -never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon -Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , -you can send messages only to verified phone numbers. After you test your app while in the -sandbox environment, you can move out of the sandbox and into production. For more +never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, +Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox +mode , you can send messages only to verified phone numbers. After you test your app while +in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. @@ -3839,11 +3838,11 @@ end get_log_delivery_configuration(user_pool_id) get_log_delivery_configuration(user_pool_id, params::Dict{String,<:Any}) -Gets the detailed activity logging configuration for a user pool. +Gets the logging configuration of a user pool. # Arguments -- `user_pool_id`: The ID of the user pool where you want to view detailed activity logging - configuration. +- `user_pool_id`: The ID of the user pool that has the logging configuration that you want + to view. """ function get_log_delivery_configuration( @@ -4005,12 +4004,12 @@ phone number before you can send SMS messages to US phone numbers. If you use SM messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If -you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, -Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox -mode , you can send messages only to verified phone numbers. After you test your app while -in the sandbox environment, you can move out of the sandbox and into production. For more -information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito -Developer Guide. +you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services +service, Amazon Simple Notification Service might place your account in the SMS sandbox. In + sandbox mode , you can send messages only to verified phone numbers. After you test your +app while in the sandbox environment, you can move out of the sandbox and into production. +For more information, see SMS message settings for Amazon Cognito user pools in the Amazon +Cognito Developer Guide. # Arguments - `access_token`: A non-expired access token for the user whose attribute verification code @@ -4170,10 +4169,10 @@ number before you can send SMS messages to US phone numbers. If you use SMS text in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have -never used SMS text messages with Amazon Cognito or any other Amazon Web Service, Amazon -Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , -you can send messages only to verified phone numbers. After you test your app while in the -sandbox environment, you can move out of the sandbox and into production. For more +never used SMS text messages with Amazon Cognito or any other Amazon Web Services service, +Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox +mode , you can send messages only to verified phone numbers. After you test your app while +in the sandbox environment, you can move out of the sandbox and into production. For more information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. @@ -4762,7 +4761,7 @@ can send SMS messages to US phone numbers. If you use SMS text messages in Amazo you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS -text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple +text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more @@ -4852,11 +4851,11 @@ phone numbers. If you use SMS text messages in Amazon Cognito, you must register number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon -Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place -your account in the SMS sandbox. In sandbox mode , you can send messages only to verified -phone numbers. After you test your app while in the sandbox environment, you can move out -of the sandbox and into production. For more information, see SMS message settings for -Amazon Cognito user pools in the Amazon Cognito Developer Guide. +Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might +place your account in the SMS sandbox. In sandbox mode , you can send messages only to +verified phone numbers. After you test your app while in the sandbox environment, you can +move out of the sandbox and into production. For more information, see SMS message +settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. # Arguments - `challenge_name`: The challenge name. For more information, see InitiateAuth. @@ -5009,13 +5008,12 @@ end set_log_delivery_configuration(log_configurations, user_pool_id) set_log_delivery_configuration(log_configurations, user_pool_id, params::Dict{String,<:Any}) -Sets up or modifies the detailed activity logging configuration of a user pool. +Sets up or modifies the logging configuration of a user pool. User pools can export user +notification logs and advanced security features user activity logs. # Arguments -- `log_configurations`: A collection of all of the detailed activity logging configurations - for a user pool. -- `user_pool_id`: The ID of the user pool where you want to configure detailed activity - logging . +- `log_configurations`: A collection of the logging configurations for a user pool. +- `user_pool_id`: The ID of the user pool where you want to configure logging. """ function set_log_delivery_configuration( @@ -5211,11 +5209,11 @@ If you use SMS text messages in Amazon Cognito, you must register a phone number Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon Cognito or any -other Amazon Web Service, Amazon Simple Notification Service might place your account in -the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. -After you test your app while in the sandbox environment, you can move out of the sandbox -and into production. For more information, see SMS message settings for Amazon Cognito -user pools in the Amazon Cognito Developer Guide. +other Amazon Web Services service, Amazon Simple Notification Service might place your +account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone +numbers. After you test your app while in the sandbox environment, you can move out of the +sandbox and into production. For more information, see SMS message settings for Amazon +Cognito user pools in the Amazon Cognito Developer Guide. # Arguments - `user_pool_id`: The user pool ID. @@ -5321,7 +5319,7 @@ can send SMS messages to US phone numbers. If you use SMS text messages in Amazo you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS -text messages with Amazon Cognito or any other Amazon Web Service, Amazon Simple +text messages with Amazon Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox mode , you can send messages only to verified phone numbers. After you test your app while in the sandbox environment, you can move out of the sandbox and into production. For more @@ -5959,12 +5957,12 @@ phone number before you can send SMS messages to US phone numbers. If you use SM messages in Amazon Cognito, you must register a phone number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If -you have never used SMS text messages with Amazon Cognito or any other Amazon Web Service, -Amazon Simple Notification Service might place your account in the SMS sandbox. In sandbox -mode , you can send messages only to verified phone numbers. After you test your app while -in the sandbox environment, you can move out of the sandbox and into production. For more -information, see SMS message settings for Amazon Cognito user pools in the Amazon Cognito -Developer Guide. +you have never used SMS text messages with Amazon Cognito or any other Amazon Web Services +service, Amazon Simple Notification Service might place your account in the SMS sandbox. In + sandbox mode , you can send messages only to verified phone numbers. After you test your +app while in the sandbox environment, you can move out of the sandbox and into production. +For more information, see SMS message settings for Amazon Cognito user pools in the Amazon +Cognito Developer Guide. # Arguments - `access_token`: A valid access token that Amazon Cognito issued to the user whose user @@ -6037,14 +6035,14 @@ phone numbers. If you use SMS text messages in Amazon Cognito, you must register number with Amazon Pinpoint. Amazon Cognito uses the registered number automatically. Otherwise, Amazon Cognito users who must receive SMS messages might not be able to sign up, activate their accounts, or sign in. If you have never used SMS text messages with Amazon -Cognito or any other Amazon Web Service, Amazon Simple Notification Service might place -your account in the SMS sandbox. In sandbox mode , you can send messages only to verified -phone numbers. After you test your app while in the sandbox environment, you can move out -of the sandbox and into production. For more information, see SMS message settings for -Amazon Cognito user pools in the Amazon Cognito Developer Guide. Updates the specified -user pool with the specified attributes. You can get a list of the current user pool -settings using DescribeUserPool. If you don't provide a value for an attribute, Amazon -Cognito sets it to its default value. Amazon Cognito evaluates Identity and Access +Cognito or any other Amazon Web Services service, Amazon Simple Notification Service might +place your account in the SMS sandbox. In sandbox mode , you can send messages only to +verified phone numbers. After you test your app while in the sandbox environment, you can +move out of the sandbox and into production. For more information, see SMS message +settings for Amazon Cognito user pools in the Amazon Cognito Developer Guide. Updates the +specified user pool with the specified attributes. You can get a list of the current user +pool settings using DescribeUserPool. If you don't provide a value for an attribute, +Amazon Cognito sets it to its default value. Amazon Cognito evaluates Identity and Access Management (IAM) policies in requests for this API operation. For this operation, you must use IAM credentials to authorize requests, and you must grant yourself the corresponding IAM permission in a policy. Learn more Signing Amazon Web Services API Requests @@ -6243,7 +6241,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys a simulated destination. When set to LEGACY, those APIs return a UserNotFoundException exception if the user doesn't exist in the user pool. Valid values include: ENABLED - This prevents user existence-related errors. LEGACY - This represents the early behavior - of Amazon Cognito where user existence related errors aren't prevented. + of Amazon Cognito where user existence related errors aren't prevented. Defaults to + LEGACY when you don't provide a value. - `"ReadAttributes"`: The list of user attributes that you want your app client to have read-only access to. After your user authenticates in your app, their access token authorizes them to read their own attribute value for any attribute in this list. An diff --git a/src/services/compute_optimizer.jl b/src/services/compute_optimizer.jl index fa7d9b491b..9375fc131f 100644 --- a/src/services/compute_optimizer.jl +++ b/src/services/compute_optimizer.jl @@ -1356,8 +1356,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Inferred workload types in the Compute Optimizer User Guide. - `"lookBackPeriod"`: The preference to control the number of days the utilization metrics of the Amazon Web Services resource are analyzed. When this preference isn't specified, we - use the default value DAYS_14. You can only set this preference for the Amazon EC2 - instance and Auto Scaling group resource types. + use the default value DAYS_14. You can only set this preference for the Amazon EC2 + instance and Auto Scaling group resource types. Amazon EC2 instance lookback + preferences can be set at the organization, account, and resource levels. Auto Scaling + group lookback preferences can only be set at the resource level. - `"preferredResources"`: The preference to control which resource type values are considered when generating rightsizing recommendations. You can specify this preference as a combination of include and exclude lists. You must specify either an includeList or diff --git a/src/services/connect.jl b/src/services/connect.jl index 23a305e19b..623f01f4ef 100644 --- a/src/services/connect.jl +++ b/src/services/connect.jl @@ -585,7 +585,7 @@ security key to the instance. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. -- `key`: A valid security key in PEM format. +- `key`: A valid security key in PEM format as a String. """ function associate_security_key( @@ -1048,7 +1048,7 @@ provided in the StartAttachedFileUpload API. # Arguments - `file_id`: The unique identifier of the attached file resource. -- `instance_id`: The unique identifier of the Connect instance. +- `instance_id`: The unique identifier of the Amazon Connect instance. - `associated_resource_arn`: The resource to which the attached file is (being) uploaded to. Cases are the only current supported resource. This value must be a valid ARN. @@ -1381,11 +1381,12 @@ end This API is in preview release for Amazon Connect and is subject to change. Initiates an Amazon Connect instance with all the supported channels enabled. It does not attach any storage, such as Amazon Simple Storage Service (Amazon S3) or Amazon Kinesis. It also does -not allow for any configurations on features, such as Contact Lens for Amazon Connect. -Amazon Connect enforces a limit on the total number of instances that you can create or -delete in 30 days. If you exceed this limit, you will get an error message indicating there -has been an excessive number of attempts at creating or deleting instances. You must wait -30 days before you can restart creating and deleting instances in your account. +not allow for any configurations on features, such as Contact Lens for Amazon Connect. For +more information, see Create an Amazon Connect instance in the Amazon Connect Administrator +Guide. Amazon Connect enforces a limit on the total number of instances that you can create +or delete in 30 days. If you exceed this limit, you will get an error message indicating +there has been an excessive number of attempts at creating or deleting instances. You must +wait 30 days before you can restart creating and deleting instances in your account. # Arguments - `identity_management_type`: The type of identity management for your Amazon Connect users. @@ -1670,7 +1671,10 @@ end create_predefined_attribute(instance_id, name, values) create_predefined_attribute(instance_id, name, values, params::Dict{String,<:Any}) -Creates a new predefined attribute for the specified Amazon Connect instance. +Creates a new predefined attribute for the specified Amazon Connect instance. Predefined +attributes are attributes in an Amazon Connect instance that can be used to route contacts +to an agent or pools of agents within a queue. For more information, see Create predefined +attributes for routing contacts to agents. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance @@ -2040,7 +2044,9 @@ end create_security_profile(instance_id, security_profile_name) create_security_profile(instance_id, security_profile_name, params::Dict{String,<:Any}) -Creates a security profile. +Creates a security profile. For information about security profiles, see Security Profiles +in the Amazon Connect Administrator Guide. For a mapping of the API name and user interface +name of the security profile permissions, see List of security profile permissions. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance @@ -2892,11 +2898,12 @@ end delete_instance(instance_id, params::Dict{String,<:Any}) This API is in preview release for Amazon Connect and is subject to change. Deletes the -Amazon Connect instance. Amazon Connect enforces a limit on the total number of instances -that you can create or delete in 30 days. If you exceed this limit, you will get an error -message indicating there has been an excessive number of attempts at creating or deleting -instances. You must wait 30 days before you can restart creating and deleting instances in -your account. +Amazon Connect instance. For more information, see Delete your Amazon Connect instance in +the Amazon Connect Administrator Guide. Amazon Connect enforces a limit on the total number +of instances that you can create or delete in 30 days. If you exceed this limit, you will +get an error message indicating there has been an excessive number of attempts at creating +or deleting instances. You must wait 30 days before you can restart creating and deleting +instances in your account. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance @@ -3041,7 +3048,8 @@ end delete_queue(instance_id, queue_id) delete_queue(instance_id, queue_id, params::Dict{String,<:Any}) -Deletes a queue. +Deletes a queue. It isn't possible to delete a queue by using the Amazon Connect admin +website. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance @@ -3574,6 +3582,45 @@ function describe_agent_status( ) end +""" + describe_authentication_profile(authentication_profile_id, instance_id) + describe_authentication_profile(authentication_profile_id, instance_id, params::Dict{String,<:Any}) + +This API is in preview release for Amazon Connect and is subject to change. To request +access to this API, contact Amazon Web Services Support. Describes the target +authentication profile. + +# Arguments +- `authentication_profile_id`: A unique identifier for the authentication profile. +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. + +""" +function describe_authentication_profile( + AuthenticationProfileId, InstanceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "GET", + "/authentication-profiles/$(InstanceId)/$(AuthenticationProfileId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_authentication_profile( + AuthenticationProfileId, + InstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "GET", + "/authentication-profiles/$(InstanceId)/$(AuthenticationProfileId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_contact(contact_id, instance_id) describe_contact(contact_id, instance_id, params::Dict{String,<:Any}) @@ -3981,7 +4028,10 @@ end describe_predefined_attribute(instance_id, name) describe_predefined_attribute(instance_id, name, params::Dict{String,<:Any}) -Describes a predefined attribute for the specified Amazon Connect instance. +Describes a predefined attribute for the specified Amazon Connect instance. Predefined +attributes are attributes in an Amazon Connect instance that can be used to route contacts +to an agent or pools of agents within a queue. For more information, see Create predefined +attributes for routing contacts to agents. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance @@ -4204,7 +4254,10 @@ end describe_security_profile(instance_id, security_profile_id) describe_security_profile(instance_id, security_profile_id, params::Dict{String,<:Any}) -Gets basic information about the security profle. +Gets basic information about the security profile. For information about security profiles, +see Security Profiles in the Amazon Connect Administrator Guide. For a mapping of the API +name and user interface name of the security profile permissions, see List of security +profile permissions. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance @@ -5528,32 +5581,35 @@ definitions in the Amazon Connect Administrator Guide. interval for the retrieval of historical metrics data. The time must be later than the start time timestamp. It cannot be later than the current timestamp. - `filters`: The filters to apply to returned metrics. You can filter on the following - resources: Agents Channels Feature Queues Routing profiles Routing step - expression User hierarchy groups At least one filter must be passed from queues, - routing profiles, agents, or user hierarchy groups. To filter by phone number, see Create a - historical metrics report in the Amazon Connect Administrator Guide. Note the following - limits: Filter keys: A maximum of 5 filter keys are supported in a single request. Valid - filter keys: AGENT | AGENT_HIERARCHY_LEVEL_ONE | AGENT_HIERARCHY_LEVEL_TWO | - AGENT_HIERARCHY_LEVEL_THREE | AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE | + resources: Agents Campaigns Channels Feature Queues Routing profiles Routing + step expression User hierarchy groups At least one filter must be passed from queues, + routing profiles, agents, or user hierarchy groups. For metrics for outbound campaigns + analytics, you can also use campaigns to satisfy at least one filter requirement. To filter + by phone number, see Create a historical metrics report in the Amazon Connect Administrator + Guide. Note the following limits: Filter keys: A maximum of 5 filter keys are supported + in a single request. Valid filter keys: AGENT | AGENT_HIERARCHY_LEVEL_ONE | + AGENT_HIERARCHY_LEVEL_TWO | AGENT_HIERARCHY_LEVEL_THREE | AGENT_HIERARCHY_LEVEL_FOUR | + AGENT_HIERARCHY_LEVEL_FIVE | ANSWERING_MACHINE_DETECTION_STATUS | CAMPAIGN | CASE_TEMPLATE_ARN | CASE_STATUS | CHANNEL | contact/segmentAttributes/connect:Subtype | - FEATURE | FLOW_TYPE | FLOWS_NEXT_RESOURCE_ID | FLOWS_NEXT_RESOURCE_QUEUE_ID | - FLOWS_OUTCOME_TYPE | FLOWS_RESOURCE_ID | INITIATION_METHOD | RESOURCE_PUBLISHED_TIMESTAMP | - ROUTING_PROFILE | ROUTING_STEP_EXPRESSION | QUEUE | Q_CONNECT_ENABLED | Filter values: - A maximum of 100 filter values are supported in a single request. VOICE, CHAT, and TASK are - valid filterValue for the CHANNEL filter key. They do not count towards limitation of 100 - filter values. For example, a GetMetricDataV2 request can filter by 50 queues, 35 agents, - and 15 routing profiles for a total of 100 filter values, along with 3 channel filters. - contact_lens_conversational_analytics is a valid filterValue for the FEATURE filter key. It - is available only to contacts analyzed by Contact Lens conversational analytics. - connect:Chat, connect:SMS, connect:Telephony, and connect:WebRTC are valid filterValue - examples (not exhaustive) for the contact/segmentAttributes/connect:Subtype filter key. - ROUTING_STEP_EXPRESSION is a valid filter key with a filter value up to 3000 length. This - filter is case and order sensitive. JSON string fields must be sorted in ascending order - and JSON array order should be kept as is. Q_CONNECT_ENABLED. TRUE and FALSE are the only - valid filterValues for the Q_CONNECT_ENABLED filter key. TRUE includes all contacts that - had Amazon Q in Connect enabled as part of the flow. FALSE includes all contacts that did - not have Amazon Q in Connect enabled as part of the flow This filter is available only - for contact record-driven metrics. + DISCONNECT_REASON | FEATURE | FLOW_TYPE | FLOWS_NEXT_RESOURCE_ID | + FLOWS_NEXT_RESOURCE_QUEUE_ID | FLOWS_OUTCOME_TYPE | FLOWS_RESOURCE_ID | INITIATION_METHOD | + RESOURCE_PUBLISHED_TIMESTAMP | ROUTING_PROFILE | ROUTING_STEP_EXPRESSION | QUEUE | + Q_CONNECT_ENABLED | Filter values: A maximum of 100 filter values are supported in a + single request. VOICE, CHAT, and TASK are valid filterValue for the CHANNEL filter key. + They do not count towards limitation of 100 filter values. For example, a GetMetricDataV2 + request can filter by 50 queues, 35 agents, and 15 routing profiles for a total of 100 + filter values, along with 3 channel filters. contact_lens_conversational_analytics is a + valid filterValue for the FEATURE filter key. It is available only to contacts analyzed by + Contact Lens conversational analytics. connect:Chat, connect:SMS, connect:Telephony, and + connect:WebRTC are valid filterValue examples (not exhaustive) for the + contact/segmentAttributes/connect:Subtype filter key. ROUTING_STEP_EXPRESSION is a valid + filter key with a filter value up to 3000 length. This filter is case and order sensitive. + JSON string fields must be sorted in ascending order and JSON array order should be kept as + is. Q_CONNECT_ENABLED. TRUE and FALSE are the only valid filterValues for the + Q_CONNECT_ENABLED filter key. TRUE includes all contacts that had Amazon Q in Connect + enabled as part of the flow. FALSE includes all contacts that did not have Amazon Q in + Connect enabled as part of the flow This filter is available only for contact + record-driven metrics. Campaign ARNs are valid filterValues for the CAMPAIGN filter key. - `metrics`: The metrics to retrieve. Specify the name, groupings, and filters for each metric. The following historical metrics are available. For a description of each metric, see Historical metrics definitions in the Amazon Connect Administrator Guide. @@ -5603,10 +5659,13 @@ definitions in the Amazon Connect Administrator Guide. Feature is a valid filter but not a valid grouping. AVG_CONVERSATION_DURATION Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: - Average conversation duration AVG_FLOW_TIME Unit: Seconds Valid groupings and filters: - Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, - Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource - ID, Initiation method, Resource published timestamp UI name: Average flow time + Average conversation duration AVG_DIALS_PER_MINUTE This metric is available only for + contacts analyzed by outbound campaigns analytics. Unit: Count Valid groupings and filters: + Campaign, Agent, Queue, Routing Profile UI name: Average dials per minute AVG_FLOW_TIME + Unit: Seconds Valid groupings and filters: Channel, + contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next + resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, + Initiation method, Resource published timestamp UI name: Average flow time AVG_GREETING_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in @@ -5655,26 +5714,25 @@ definitions in the Amazon Connect Administrator Guide. AVG_TALK_TIME_CUSTOMER This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in - Connect UI name: Average customer talk time CASES_CREATED Unit: Count Required filter - key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: - Cases created CONTACTS_ABANDONED Unit: Count Metric filter: Valid values: API| - Incoming | Outbound | Transfer | Callback | Queue_Transfer| Disconnect Valid groupings - and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, - contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: - Contact abandoned CONTACTS_ABANDONED_IN_X Unit: Count Valid groupings and filters: - Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect - Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in - seconds. For Comparison, you must enter LT (for \"Less than\"). UI name: Contacts - abandoned in X seconds CONTACTS_ANSWERED_IN_X Unit: Count Valid groupings and filters: - Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect - Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in - seconds. For Comparison, you must enter LT (for \"Less than\"). UI name: Contacts answered - in X seconds CONTACTS_CREATED Unit: Count Valid metric filter key: INITIATION_METHOD - Valid groupings and filters: Queue, Channel, Routing Profile, Feature, - contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts created Feature - is a valid filter but not a valid grouping. CONTACTS_HANDLED Unit: Count Valid metric - filter key: INITIATION_METHOD, DISCONNECT_REASON Valid groupings and filters: Queue, - Channel, Routing Profile, Agent, Agent Hierarchy, Feature, + Connect UI name: Average customer talk time AVG_WAIT_TIME_AFTER_CUSTOMER_CONNECTION This + metric is available only for contacts analyzed by outbound campaigns analytics. Unit: + Seconds Valid groupings and filters: Campaign UI name: Average wait time after customer + connection CAMPAIGN_CONTACTS_ABANDONED_AFTER_X This metric is available only for + contacts analyzed by outbound campaigns analytics. Unit: Count Valid groupings and filters: + Campaign, Agent Threshold: For ThresholdValue, enter any whole number from 1 to 604800 + (inclusive), in seconds. For Comparison, you must enter GT (for Greater than). UI name: + Campaign contacts abandoned after X CAMPAIGN_CONTACTS_ABANDONED_AFTER_X_RATE This metric + is available only for contacts analyzed by outbound campaigns analytics. Unit: Percent + Valid groupings and filters: Campaign, Agent Threshold: For ThresholdValue, enter any + whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter GT + (for Greater than). UI name: Campaign contacts abandoned after X rate CASES_CREATED + Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: + CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases created CONTACTS_CREATED Unit: Count Valid + metric filter key: INITIATION_METHOD Valid groupings and filters: Queue, Channel, Routing + Profile, Feature, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts + created Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED Unit: + Count Valid metric filter key: INITIATION_METHOD, DISCONNECT_REASON Valid groupings and + filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: API contacts handled Feature is a valid filter but not a valid grouping. CONTACTS_HANDLED_BY_CONNECTED_TO_AGENT Unit: Count Valid metric filter key: @@ -5698,53 +5756,72 @@ definitions in the Amazon Connect Administrator Guide. contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts queued CONTACTS_QUEUED_BY_ENQUEUE Unit: Count Valid groupings and filters: Queue, Channel, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype UI name: Contacts queued - (enqueue timestamp) CONTACTS_RESOLVED_IN_X Unit: Count Valid groupings and filters: - Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect - Threshold: For ThresholdValue enter any whole number from 1 to 604800 (inclusive), in - seconds. For Comparison, you must enter LT (for \"Less than\"). UI name: Contacts resolved - in X CONTACTS_TRANSFERRED_OUT Unit: Count Valid groupings and filters: Queue, Channel, - Routing Profile, Agent, Agent Hierarchy, Feature, + (enqueue timestamp) CONTACTS_REMOVED_FROM_QUEUE_IN_X Unit: Count Valid groupings and + filters: Queue, Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter + any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter + LT (for \"Less than\"). UI name: Contacts removed from queue in X seconds + CONTACTS_RESOLVED_IN_X Unit: Count Valid groupings and filters: Queue, Channel, Routing + Profile, contact/segmentAttributes/connect:Subtype, Q in Connect Threshold: For + ThresholdValue enter any whole number from 1 to 604800 (inclusive), in seconds. For + Comparison, you must enter LT (for \"Less than\"). UI name: Contacts resolved in X + CONTACTS_TRANSFERRED_OUT Unit: Count Valid groupings and filters: Queue, Channel, Routing + Profile, Agent, Agent Hierarchy, Feature, contact/segmentAttributes/connect:Subtype, Q in + Connect UI name: Contacts transferred out Feature is a valid filter but not a valid + grouping. CONTACTS_TRANSFERRED_OUT_BY_AGENT Unit: Count Valid groupings and filters: + Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out - Feature is a valid filter but not a valid grouping. CONTACTS_TRANSFERRED_OUT_BY_AGENT - Unit: Count Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent - Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts - transferred out by agent CONTACTS_TRANSFERRED_OUT_FROM_QUEUE Unit: Count Valid groupings - and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, + by agent CONTACTS_TRANSFERRED_OUT_FROM_QUEUE Unit: Count Valid groupings and filters: + Queue, Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contacts transferred out queue CURRENT_CASES Unit: Count Required filter key: CASE_TEMPLATE_ARN Valid groupings - and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Current cases FLOWS_OUTCOME Unit: - Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow - type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows - outcome type, Flows resource ID, Initiation method, Resource published timestamp UI name: - Flows outcome FLOWS_STARTED Unit: Count Valid groupings and filters: Channel, + and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Current cases DELIVERY_ATTEMPTS + This metric is available only for contacts analyzed by outbound campaigns analytics. Unit: + Count Valid metric filter key: ANSWERING_MACHINE_DETECTION_STATUS, DISCONNECT_REASON Valid + groupings and filters: Campaign, Agent, Queue, Routing Profile, Answering Machine Detection + Status, Disconnect Reason UI name: Delivery attempts DELIVERY_ATTEMPT_DISPOSITION_RATE + This metric is available only for contacts analyzed by outbound campaigns analytics, and + with the answering machine detection enabled. Unit: Percent Valid metric filter key: + ANSWERING_MACHINE_DETECTION_STATUS, DISCONNECT_REASON Valid groupings and filters: + Campaign, Agent, Answering Machine Detection Status, Disconnect Reason Answering Machine + Detection Status and Disconnect Reason are valid filters but not valid groupings. UI name: + Delivery attempt disposition rate FLOWS_OUTCOME Unit: Count Valid groupings and filters: + Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, + Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource + ID, Initiation method, Resource published timestamp UI name: Flows outcome FLOWS_STARTED + Unit: Count Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows started - MAX_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, + HUMAN_ANSWERED_CALLS This metric is available only for contacts analyzed by outbound + campaigns analytics, and with the answering machine detection enabled. Unit: Count Valid + groupings and filters: Campaign, Agent UI name: Human answered MAX_FLOW_TIME Unit: + Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, + Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, + Flows outcome type, Flows resource ID, Initiation method, Resource published timestamp UI + name: Maximum flow time MAX_QUEUED_TIME Unit: Seconds Valid groupings and filters: + Queue, Channel, Routing Profile, Agent, Agent Hierarchy, + contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Maximum queued time + MIN_FLOW_TIME Unit: Seconds Valid groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, - Initiation method, Resource published timestamp UI name: Maximum flow time - MAX_QUEUED_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing - Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI - name: Maximum queued time MIN_FLOW_TIME Unit: Seconds Valid groupings and filters: - Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, - Flows next resource ID, Flows next resource queue ID, Flows outcome type, Flows resource - ID, Initiation method, Resource published timestamp UI name: Minimum flow time + Initiation method, Resource published timestamp UI name: Minimum flow time PERCENT_CASES_FIRST_CONTACT_RESOLVED Unit: Percent Required filter key: CASE_TEMPLATE_ARN Valid groupings and filters: CASE_TEMPLATE_ARN, CASE_STATUS UI name: Cases resolved on first contact PERCENT_CONTACTS_STEP_EXPIRED Unit: Percent Valid groupings and filters: - Queue, RoutingStepExpression UI name: Not available PERCENT_CONTACTS_STEP_JOINED Unit: - Percent Valid groupings and filters: Queue, RoutingStepExpression UI name: Not available - PERCENT_FLOWS_OUTCOME Unit: Percent Valid metric filter key: FLOWS_OUTCOME_TYPE Valid - groupings and filters: Channel, contact/segmentAttributes/connect:Subtype, Flow type, Flows - module resource ID, Flows next resource ID, Flows next resource queue ID, Flows outcome - type, Flows resource ID, Initiation method, Resource published timestamp UI name: Flows - outcome percentage. The FLOWS_OUTCOME_TYPE is not a valid grouping. - PERCENT_NON_TALK_TIME This metric is available only for contacts analyzed by Contact Lens - conversational analytics. Unit: Percentage Valid groupings and filters: Queue, Channel, - Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in - Connect UI name: Non-talk time percent PERCENT_TALK_TIME This metric is available only - for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid - groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, + Queue, RoutingStepExpression UI name: This metric is available in Real-time Metrics UI but + not on the Historical Metrics UI. PERCENT_CONTACTS_STEP_JOINED Unit: Percent Valid + groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in + Real-time Metrics UI but not on the Historical Metrics UI. PERCENT_FLOWS_OUTCOME Unit: + Percent Valid metric filter key: FLOWS_OUTCOME_TYPE Valid groupings and filters: Channel, + contact/segmentAttributes/connect:Subtype, Flow type, Flows module resource ID, Flows next + resource ID, Flows next resource queue ID, Flows outcome type, Flows resource ID, + Initiation method, Resource published timestamp UI name: Flows outcome percentage. The + FLOWS_OUTCOME_TYPE is not a valid grouping. PERCENT_NON_TALK_TIME This metric is + available only for contacts analyzed by Contact Lens conversational analytics. Unit: + Percentage Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent + Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Non-talk time + percent PERCENT_TALK_TIME This metric is available only for contacts analyzed by Contact + Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, + Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Talk time percent PERCENT_TALK_TIME_AGENT This metric is available only for contacts analyzed by Contact Lens conversational analytics. Unit: Percentage Valid groupings and filters: Queue, @@ -5762,27 +5839,40 @@ definitions in the Amazon Connect Administrator Guide. Channel, Routing Profile, Q in Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in seconds. For Comparison, you must enter LT (for \"Less than\"). UI name: Service level X STEP_CONTACTS_QUEUED Unit: Count Valid - groupings and filters: Queue, RoutingStepExpression UI name: Not available - SUM_AFTER_CONTACT_WORK_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, - Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: After contact work time - SUM_CONNECTING_TIME_AGENT Unit: Seconds Valid metric filter key: INITIATION_METHOD. This - metric only supports the following filter keys as INITIATION_METHOD: INBOUND | OUTBOUND | - CALLBACK | API Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent - Hierarchy UI name: Agent API connecting time The Negate key in Metric Level Filters is - not applicable for this metric. SUM_CONTACT_FLOW_TIME Unit: Seconds Valid groupings and - filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: - Contact flow time SUM_CONTACT_TIME_AGENT Unit: Seconds Valid groupings and filters: - Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent on contact time - SUM_CONTACTS_DISCONNECTED Valid metric filter key: DISCONNECT_REASON Unit: Count Valid - groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, + groupings and filters: Queue, RoutingStepExpression UI name: This metric is available in + Real-time Metrics UI but not on the Historical Metrics UI. SUM_AFTER_CONTACT_WORK_TIME + Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent + Hierarchy, Q in Connect UI name: After contact work time SUM_CONNECTING_TIME_AGENT Unit: + Seconds Valid metric filter key: INITIATION_METHOD. This metric only supports the following + filter keys as INITIATION_METHOD: INBOUND | OUTBOUND | CALLBACK | API Valid groupings and + filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy UI name: Agent API + connecting time The Negate key in Metric Level Filters is not applicable for this metric. + CONTACTS_ABANDONED Unit: Count Metric filter: Valid values: API| Incoming | Outbound + | Transfer | Callback | Queue_Transfer| Disconnect Valid groupings and filters: Queue, + Channel, Routing Profile, Agent, Agent Hierarchy, + contact/segmentAttributes/connect:Subtype, RoutingStepExpression, Q in Connect UI name: + Contact abandoned SUM_CONTACTS_ABANDONED_IN_X Unit: Count Valid groupings and filters: + Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in Connect + Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), in + seconds. For Comparison, you must enter LT (for \"Less than\"). UI name: Contacts + abandoned in X seconds SUM_CONTACTS_ANSWERED_IN_X Unit: Count Valid groupings and + filters: Queue, Channel, Routing Profile, contact/segmentAttributes/connect:Subtype, Q in + Connect Threshold: For ThresholdValue, enter any whole number from 1 to 604800 (inclusive), + in seconds. For Comparison, you must enter LT (for \"Less than\"). UI name: Contacts + answered in X seconds SUM_CONTACT_FLOW_TIME Unit: Seconds Valid groupings and filters: + Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Contact flow + time SUM_CONTACT_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, + Agent, Agent Hierarchy UI name: Agent on contact time SUM_CONTACTS_DISCONNECTED Valid + metric filter key: DISCONNECT_REASON Unit: Count Valid groupings and filters: Queue, + Channel, Routing Profile, Agent, Agent Hierarchy, contact/segmentAttributes/connect:Subtype, Q in Connect UI name: Contact disconnected - SUM_ERROR_STATUS_TIME_AGENT Unit: Seconds Valid groupings and filters: Queue, Channel, - Routing Profile, Agent, Agent Hierarchy UI name: Error status time SUM_HANDLE_TIME Unit: - Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent - Hierarchy, Q in Connect UI name: Contact handle time SUM_HOLD_TIME Unit: Count Valid + SUM_ERROR_STATUS_TIME_AGENT Unit: Seconds Valid groupings and filters: Routing Profile, + Agent, Agent Hierarchy UI name: Error status time SUM_HANDLE_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in - Connect UI name: Customer hold time SUM_IDLE_TIME_AGENT Unit: Seconds Valid groupings - and filters: Routing Profile, Agent, Agent Hierarchy UI name: Agent idle time + Connect UI name: Contact handle time SUM_HOLD_TIME Unit: Count Valid groupings and + filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: + Customer hold time SUM_IDLE_TIME_AGENT Unit: Seconds Valid groupings and filters: + Routing Profile, Agent, Agent Hierarchy UI name: Agent idle time SUM_INTERACTION_AND_HOLD_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, Routing Profile, Agent, Agent Hierarchy, Q in Connect UI name: Agent interaction and hold time SUM_INTERACTION_TIME Unit: Seconds Valid groupings and filters: Queue, Channel, @@ -5808,8 +5898,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are returned apply to the metrics for each queue. They are not aggregated for all queues. If no grouping is specified, a summary of all metrics is returned. Valid grouping keys: AGENT | AGENT_HIERARCHY_LEVEL_ONE | AGENT_HIERARCHY_LEVEL_TWO | AGENT_HIERARCHY_LEVEL_THREE - | AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE | CASE_TEMPLATE_ARN | CASE_STATUS - | CHANNEL | contact/segmentAttributes/connect:Subtype | FLOWS_RESOURCE_ID | + | AGENT_HIERARCHY_LEVEL_FOUR | AGENT_HIERARCHY_LEVEL_FIVE | + ANSWERING_MACHINE_DETECTION_STATUS | CAMPAIGN | CASE_TEMPLATE_ARN | CASE_STATUS | CHANNEL | + contact/segmentAttributes/connect:Subtype | DISCONNECT_REASON | FLOWS_RESOURCE_ID | FLOWS_MODULE_RESOURCE_ID | FLOW_TYPE | FLOWS_OUTCOME_TYPE | INITIATION_METHOD | Q_CONNECT_ENABLED | QUEUE | RESOURCE_PUBLISHED_TIMESTAMP | ROUTING_PROFILE | ROUTING_STEP_EXPRESSION @@ -5999,7 +6090,20 @@ end Imports a claimed phone number from an external service, such as Amazon Pinpoint, into an Amazon Connect instance. You can call this API only in the same Amazon Web Services Region -where the Amazon Connect instance was created. +where the Amazon Connect instance was created. Call the DescribePhoneNumber API to verify +the status of a previous ImportPhoneNumber operation. If you plan to claim or import +numbers and then release numbers frequently, contact us for a service quota exception. +Otherwise, it is possible you will be blocked from claiming and releasing any more numbers +until up to 180 days past the oldest number released has expired. By default you can +claim or import and then release up to 200% of your maximum number of active phone numbers. +If you claim or import and then release phone numbers using the UI or API during a rolling +180 day cycle that exceeds 200% of your phone number service level quota, you will be +blocked from claiming or importing any more numbers until 180 days past the oldest number +released has expired. For example, if you already have 99 claimed or imported numbers and +a service level quota of 99 phone numbers, and in any 180 day period you release 99, claim +99, and then release 99, you will have exceeded the 200% limit. At that point you are +blocked from claiming any more numbers until you open an Amazon Web Services Support +ticket. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance @@ -6180,6 +6284,48 @@ function list_approved_origins( ) end +""" + list_authentication_profiles(instance_id) + list_authentication_profiles(instance_id, params::Dict{String,<:Any}) + +This API is in preview release for Amazon Connect and is subject to change. To request +access to this API, contact Amazon Web Services Support. Provides summary information about +the authentication profiles in a specified Amazon Connect instance. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return per page. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +""" +function list_authentication_profiles( + InstanceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "GET", + "/authentication-profiles-summary/$(InstanceId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_authentication_profiles( + InstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "GET", + "/authentication-profiles-summary/$(InstanceId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_bots(instance_id, lex_version) list_bots(instance_id, lex_version, params::Dict{String,<:Any}) @@ -6961,7 +7107,10 @@ end list_predefined_attributes(instance_id) list_predefined_attributes(instance_id, params::Dict{String,<:Any}) -Lists predefined attributes for the specified Amazon Connect instance. +Lists predefined attributes for the specified Amazon Connect instance. Predefined +attributes are attributes in an Amazon Connect instance that can be used to route contacts +to an agent or pools of agents within a queue. For more information, see Create predefined +attributes for routing contacts to agents. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance @@ -7437,7 +7586,10 @@ end list_security_profile_permissions(instance_id, security_profile_id) list_security_profile_permissions(instance_id, security_profile_id, params::Dict{String,<:Any}) -Lists the permissions granted to a security profile. +Lists the permissions granted to a security profile. For information about security +profiles, see Security Profiles in the Amazon Connect Administrator Guide. For a mapping of +the API name and user interface name of the security profile permissions, see List of +security profile permissions. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance @@ -7481,7 +7633,8 @@ end Provides summary information about the security profiles for the specified Amazon Connect instance. For more information about security profiles, see Security Profiles in the Amazon -Connect Administrator Guide. +Connect Administrator Guide. For a mapping of the API name and user interface name of the +security profile permissions, see List of security profile permissions. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance @@ -8318,6 +8471,51 @@ function resume_contact_recording( ) end +""" + search_agent_statuses(instance_id) + search_agent_statuses(instance_id, params::Dict{String,<:Any}) + +Searches AgentStatuses in an Amazon Connect instance, with optional filtering. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instanceId + in the ARN of the instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results to return per page. +- `"NextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +- `"SearchCriteria"`: The search criteria to be used to return agent statuses. +- `"SearchFilter"`: Filters to be applied to search results. +""" +function search_agent_statuses( + InstanceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "POST", + "/search-agent-statuses", + Dict{String,Any}("InstanceId" => InstanceId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function search_agent_statuses( + InstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/search-agent-statuses", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("InstanceId" => InstanceId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ search_available_phone_numbers(phone_number_country_code, phone_number_type) search_available_phone_numbers(phone_number_country_code, phone_number_type, params::Dict{String,<:Any}) @@ -8579,7 +8777,10 @@ end search_predefined_attributes(instance_id) search_predefined_attributes(instance_id, params::Dict{String,<:Any}) -Predefined attributes that meet certain criteria. +Searches predefined attributes that meet certain criteria. Predefined attributes are +attributes in an Amazon Connect instance that can be used to route contacts to an agent or +pools of agents within a queue. For more information, see Create predefined attributes for +routing contacts to agents. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance @@ -8770,7 +8971,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys previous response in the next request to retrieve the next set of results. - `"ResourceTypes"`: The list of resource types to be used to search tags from. If not provided or if any empty list is provided, this API will search from all supported resource - types. + types. Supported resource types AGENT ROUTING_PROFILE STANDARD_QUEUE + SECURITY_PROFILE OPERATING_HOURS PROMPT CONTACT_FLOW FLOW_MODULE - `"SearchCriteria"`: The search criteria to be used to return tags. """ function search_resource_tags(InstanceId; aws_config::AbstractAWSConfig=global_aws_config()) @@ -8850,7 +9052,10 @@ end search_security_profiles(instance_id) search_security_profiles(instance_id, params::Dict{String,<:Any}) -Searches security profiles in an Amazon Connect instance, with optional filtering. +Searches security profiles in an Amazon Connect instance, with optional filtering. For +information about security profiles, see Security Profiles in the Amazon Connect +Administrator Guide. For a mapping of the API name and user interface name of the security +profile permissions, see List of security profile permissions. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance @@ -8894,6 +9099,53 @@ function search_security_profiles( ) end +""" + search_user_hierarchy_groups(instance_id) + search_user_hierarchy_groups(instance_id, params::Dict{String,<:Any}) + +Searches UserHierarchyGroups in an Amazon Connect instance, with optional filtering. The +UserHierarchyGroup with \"LevelId\": \"0\" is the foundation for building levels on top of +an instance. It is not user-definable, nor is it visible in the UI. + +# Arguments +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instanceId + in the ARN of the instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results to return per page. +- `"NextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +- `"SearchCriteria"`: The search criteria to be used to return UserHierarchyGroups. +- `"SearchFilter"`: Filters to be applied to search results. +""" +function search_user_hierarchy_groups( + InstanceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "POST", + "/search-user-hierarchy-groups", + Dict{String,Any}("InstanceId" => InstanceId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function search_user_hierarchy_groups( + InstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/search-user-hierarchy-groups", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("InstanceId" => InstanceId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ search_users(instance_id) search_users(instance_id, params::Dict{String,<:Any}) @@ -9054,13 +9306,13 @@ end start_attached_file_upload(file_name, file_size_in_bytes, file_use_case_type, instance_id, associated_resource_arn, params::Dict{String,<:Any}) Provides a pre-signed Amazon S3 URL in response for uploading your content. You may only -use this API to upload attachments to a Connect Case. +use this API to upload attachments to an Amazon Connect Case. # Arguments - `file_name`: A case-sensitive name of the attached file being uploaded. - `file_size_in_bytes`: The size of the attached file in bytes. - `file_use_case_type`: The use case for the file. -- `instance_id`: The unique identifier of the Connect instance. +- `instance_id`: The unique identifier of the Amazon Connect instance. - `associated_resource_arn`: The resource to which the attached file is (being) uploaded to. Cases are the only current supported resource. This value must be a valid ARN. @@ -9141,8 +9393,9 @@ throttling returns a TooManyRequests exception. The quota for concurrent activ exceeded. Active chat throttling returns a LimitExceededException. If you use the ChatDurationInMinutes parameter and receive a 400 error, your account may not support the ability to configure custom chat durations. For more information, contact Amazon Web -Services Support. For more information about chat, see Chat in the Amazon Connect -Administrator Guide. +Services Support. For more information about chat, see the following topics in the Amazon +Connect Administrator Guide: Concepts: Web and mobile messaging capabilities in Amazon +Connect Amazon Connect Chat security best practices # Arguments - `contact_flow_id`: The identifier of the flow for initiating the chat. To see the @@ -9384,7 +9637,9 @@ end Initiates real-time message streaming for a new chat contact. For more information about message streaming, see Enable real-time chat message streaming in the Amazon Connect -Administrator Guide. +Administrator Guide. For more information about chat, see the following topics in the +Amazon Connect Administrator Guide: Concepts: Web and mobile messaging capabilities in +Amazon Connect Amazon Connect Chat security best practices # Arguments - `chat_streaming_configuration`: The streaming configuration, such as the Amazon SNS @@ -9674,8 +9929,8 @@ Amazon Connect instance (specified as InstanceId). # Arguments - `contact_flow_id`: The identifier of the flow for the call. To see the ContactFlowId in - the Amazon Connect admin website, on the navigation menu go to Routing, Contact Flows. - Choose the flow. On the flow page, under the name of the flow, choose Show additional flow + the Amazon Connect admin website, on the navigation menu go to Routing, Flows. Choose the + flow. On the flow page, under the name of the flow, choose Show additional flow information. The ContactFlowId is the last part of the ARN, shown here in bold: arn:aws:connect:us-west-2:xxxxxxxxxxxx:instance/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/contact -flow/846ec553-a005-41c0-8341-xxxxxxxxxxxx @@ -10317,6 +10572,61 @@ function update_agent_status( ) end +""" + update_authentication_profile(authentication_profile_id, instance_id) + update_authentication_profile(authentication_profile_id, instance_id, params::Dict{String,<:Any}) + +This API is in preview release for Amazon Connect and is subject to change. To request +access to this API, contact Amazon Web Services Support. Updates the selected +authentication profile. + +# Arguments +- `authentication_profile_id`: A unique identifier for the authentication profile. +- `instance_id`: The identifier of the Amazon Connect instance. You can find the instance + ID in the Amazon Resource Name (ARN) of the instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AllowedIps"`: A list of IP address range strings that are allowed to access the + instance. For more information on how to configure IP addresses, seeConfigure session + timeouts in the Amazon Connect Administrator Guide. +- `"BlockedIps"`: A list of IP address range strings that are blocked from accessing the + instance. For more information on how to configure IP addresses, For more information on + how to configure IP addresses, see Configure IP-based access control in the Amazon Connect + Administrator Guide. +- `"Description"`: The description for the authentication profile. +- `"Name"`: The name for the authentication profile. +- `"PeriodicSessionDuration"`: The short lived session duration configuration for users + logged in to Amazon Connect, in minutes. This value determines the maximum possible time + before an agent is authenticated. For more information, For more information on how to + configure IP addresses, see Configure session timeouts in the Amazon Connect Administrator + Guide. +""" +function update_authentication_profile( + AuthenticationProfileId, InstanceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return connect( + "POST", + "/authentication-profiles/$(InstanceId)/$(AuthenticationProfileId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_authentication_profile( + AuthenticationProfileId, + InstanceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return connect( + "POST", + "/authentication-profiles/$(InstanceId)/$(AuthenticationProfileId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_contact(contact_id, instance_id) update_contact(contact_id, instance_id, params::Dict{String,<:Any}) @@ -10730,6 +11040,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys contact's routing age. Contacts are routed to agents on a first-come, first-serve basis. This means that changing their amount of time in queue compared to others also changes their position in queue. +- `"RoutingCriteria"`: Updates the routing criteria on the contact. These properties can be + used to change how a
 contact is routed within the queue. """ function update_contact_routing_data( ContactId, InstanceId; aws_config::AbstractAWSConfig=global_aws_config() @@ -11188,7 +11500,10 @@ end update_predefined_attribute(instance_id, name) update_predefined_attribute(instance_id, name, params::Dict{String,<:Any}) -Updates a predefined attribute for the specified Amazon Connect instance. +Updates a predefined attribute for the specified Amazon Connect instance. Predefined +attributes are attributes in an Amazon Connect instance that can be used to route contacts +to an agent or pools of agents within a queue. For more information, see Create predefined +attributes for routing contacts to agents. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance @@ -11906,7 +12221,9 @@ end update_security_profile(instance_id, security_profile_id) update_security_profile(instance_id, security_profile_id, params::Dict{String,<:Any}) -Updates a security profile. +Updates a security profile. For information about security profiles, see Security Profiles +in the Amazon Connect Administrator Guide. For a mapping of the API name and user interface +name of the security profile permissions, see List of security profile permissions. # Arguments - `instance_id`: The identifier of the Amazon Connect instance. You can find the instance diff --git a/src/services/connect_contact_lens.jl b/src/services/connect_contact_lens.jl index 4c4d4639c4..99702556ce 100644 --- a/src/services/connect_contact_lens.jl +++ b/src/services/connect_contact_lens.jl @@ -16,7 +16,7 @@ Provides a list of analysis segments for a real-time analysis session. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"MaxResults"`: The maximimum number of results to return per page. +- `"MaxResults"`: The maximum number of results to return per page. - `"NextToken"`: The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results. """ diff --git a/src/services/controlcatalog.jl b/src/services/controlcatalog.jl index 78a7e27db8..304112bb91 100644 --- a/src/services/controlcatalog.jl +++ b/src/services/controlcatalog.jl @@ -4,6 +4,52 @@ using AWS.AWSServices: controlcatalog using AWS.Compat using AWS.UUIDs +""" + get_control(control_arn) + get_control(control_arn, params::Dict{String,<:Any}) + +Returns details about a specific control, most notably a list of Amazon Web Services +Regions where this control is supported. Input a value for the ControlArn parameter, in ARN +form. GetControl accepts controltower or controlcatalog control ARNs as input. Returns a +controlcatalog ARN format. In the API response, controls that have the value GLOBAL in the +Scope field do not show the DeployableRegions field, because it does not apply. Controls +that have the value REGIONAL in the Scope field return a value for the DeployableRegions +field, as shown in the example. + +# Arguments +- `control_arn`: The Amazon Resource Name (ARN) of the control. It has one of the following + formats: Global format + arn:{PARTITION}:controlcatalog:::control/{CONTROL_CATALOG_OPAQUE_ID} Or Regional format + arn:{PARTITION}:controltower:{REGION}::control/{CONTROL_TOWER_OPAQUE_ID} Here is a more + general pattern that covers Amazon Web Services Control Tower and Control Catalog ARNs: + ^arn:(aws(?:[-a-z]*)?):(controlcatalog|controltower):[a-zA-Z0-9-]*::control/[0-9a-zA-Z_-]+ + +""" +function get_control(ControlArn; aws_config::AbstractAWSConfig=global_aws_config()) + return controlcatalog( + "POST", + "/get-control", + Dict{String,Any}("ControlArn" => ControlArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_control( + ControlArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return controlcatalog( + "POST", + "/get-control", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ControlArn" => ControlArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_common_controls() list_common_controls(params::Dict{String,<:Any}) @@ -37,6 +83,37 @@ function list_common_controls( ) end +""" + list_controls() + list_controls(params::Dict{String,<:Any}) + +Returns a paginated list of all available controls in the Amazon Web Services Control +Catalog library. Allows you to discover available controls. The list of controls is given +as structures of type controlSummary. The ARN is returned in the global controlcatalog +format, as shown in the examples. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results on a page or for an API request call. +- `"nextToken"`: The pagination token that's used to fetch the next set of results. +""" +function list_controls(; aws_config::AbstractAWSConfig=global_aws_config()) + return controlcatalog( + "POST", "/list-controls"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_controls( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return controlcatalog( + "POST", + "/list-controls", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_domains() list_domains(params::Dict{String,<:Any}) diff --git a/src/services/controltower.jl b/src/services/controltower.jl index d5f92b0b2e..c7e452f056 100644 --- a/src/services/controltower.jl +++ b/src/services/controltower.jl @@ -144,10 +144,10 @@ end disable_control(control_identifier, target_identifier) disable_control(control_identifier, target_identifier, params::Dict{String,<:Any}) -This API call turns off a control. It starts an asynchronous operation that deletes AWS -resources on the specified organizational unit and the accounts it contains. The resources -will vary according to the control that you specify. For usage examples, see the Amazon -Web Services Control Tower User Guide . +This API call turns off a control. It starts an asynchronous operation that deletes Amazon +Web Services resources on the specified organizational unit and the accounts it contains. +The resources will vary according to the control that you specify. For usage examples, see +the Controls Reference Guide . # Arguments - `control_identifier`: The ARN of the control. Only Strongly recommended and Elective @@ -265,7 +265,7 @@ end This API call activates a control. It starts an asynchronous operation that creates Amazon Web Services resources on the specified organizational unit and the accounts it contains. The resources created will vary according to the control that you specify. For usage -examples, see the Amazon Web Services Control Tower User Guide . +examples, see the Controls Reference Guide . # Arguments - `control_identifier`: The ARN of the control. Only Strongly recommended and Elective @@ -406,7 +406,7 @@ end Returns the status of a particular EnableControl or DisableControl operation. Displays a message in case of error. Details for an operation are available for 90 days. For usage -examples, see the Amazon Web Services Control Tower User Guide . +examples, see the Controls Reference Guide . # Arguments - `operation_identifier`: The ID of the asynchronous operation, which is used to track @@ -490,8 +490,8 @@ end get_enabled_control(enabled_control_identifier) get_enabled_control(enabled_control_identifier, params::Dict{String,<:Any}) -Retrieves details about an enabled control. For usage examples, see the Amazon Web -Services Control Tower User Guide . +Retrieves details about an enabled control. For usage examples, see the Controls Reference +Guide . # Arguments - `enabled_control_identifier`: The controlIdentifier of the enabled control. @@ -644,7 +644,8 @@ end list_control_operations() list_control_operations(params::Dict{String,<:Any}) -Provides a list of operations in progress or queued. +Provides a list of operations in progress or queued. For usage examples, see +ListControlOperation examples. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -713,12 +714,12 @@ end list_enabled_controls(params::Dict{String,<:Any}) Lists the controls enabled by Amazon Web Services Control Tower on the specified -organizational unit and the accounts it contains. For usage examples, see the Amazon Web -Services Control Tower User Guide . +organizational unit and the accounts it contains. For usage examples, see the Controls +Reference Guide . # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"filter"`: An input filter for the ListCEnabledControls API that lets you select the +- `"filter"`: An input filter for the ListEnabledControls API that lets you select the types of control operations to view. - `"maxResults"`: How many results to return per API call. - `"nextToken"`: The token to continue the list from a previous API call with the same @@ -746,6 +747,41 @@ function list_enabled_controls( ) end +""" + list_landing_zone_operations() + list_landing_zone_operations(params::Dict{String,<:Any}) + +Lists all landing zone operations from the past 90 days. Results are sorted by time, with +the most recent operation first. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"filter"`: An input filter for the ListLandingZoneOperations API that lets you select + the types of landing zone operations to view. +- `"maxResults"`: How many results to return per API call. +- `"nextToken"`: The token to continue the list from a previous API call with the same + parameters. +""" +function list_landing_zone_operations(; aws_config::AbstractAWSConfig=global_aws_config()) + return controltower( + "POST", + "/list-landingzone-operations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_landing_zone_operations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return controltower( + "POST", + "/list-landingzone-operations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_landing_zones() list_landing_zones(params::Dict{String,<:Any}) @@ -781,8 +817,8 @@ end list_tags_for_resource(resource_arn) list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) -Returns a list of tags associated with the resource. For usage examples, see the Amazon -Web Services Control Tower User Guide . +Returns a list of tags associated with the resource. For usage examples, see the Controls +Reference Guide . # Arguments - `resource_arn`: The ARN of the resource. @@ -861,7 +897,10 @@ end reset_landing_zone(landing_zone_identifier, params::Dict{String,<:Any}) This API call resets a landing zone. It starts an asynchronous operation that resets the -landing zone to the parameters specified in its original configuration. +landing zone to the parameters specified in the original configuration, which you specified +in the manifest file. Nothing in the manifest file's original landing zone configuration is +changed during the reset process, by default. This API is not the same as a rollback of a +landing zone version, which is not a supported operation. # Arguments - `landing_zone_identifier`: The unique identifier of the landing zone. @@ -902,8 +941,7 @@ end tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) -Applies tags to a resource. For usage examples, see the Amazon Web Services Control Tower -User Guide . +Applies tags to a resource. For usage examples, see the Controls Reference Guide . # Arguments - `resource_arn`: The ARN of the resource to be tagged. @@ -938,8 +976,7 @@ end untag_resource(resource_arn, tag_keys) untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) -Removes tags from a resource. For usage examples, see the Amazon Web Services Control -Tower User Guide . +Removes tags from a resource. For usage examples, see the Controls Reference Guide . # Arguments - `resource_arn`: The ARN of the resource. @@ -1036,11 +1073,11 @@ end EnablementStatus of SUCCEEDED, supply parameters that are different from the currently configured parameters. Otherwise, Amazon Web Services Control Tower will not accept the request. If the enabled control shows an EnablementStatus of FAILED, Amazon Web Services -Control Tower will update the control to match any valid parameters that you supply. If the +Control Tower updates the control to match any valid parameters that you supply. If the DriftSummary status for the control shows as DRIFTED, you cannot call this API. Instead, you can update the control by calling DisableControl and again calling EnableControl, or -you can run an extending governance operation. For usage examples, see the Amazon Web -Services Control Tower User Guide +you can run an extending governance operation. For usage examples, see the Controls +Reference Guide . # Arguments - `enabled_control_identifier`: The ARN of the enabled control that will be updated. @@ -1095,8 +1132,10 @@ specified in the updated manifest file. # Arguments - `landing_zone_identifier`: The unique identifier of the landing zone. -- `manifest`: The manifest JSON file is a text file that describes your Amazon Web Services - resources. For examples, review Launch your landing zone. +- `manifest`: The manifest file (JSON) is a text file that describes your Amazon Web + Services resources. For an example, review Launch your landing zone. The example manifest + file contains each of the available parameters. The schema for the landing zone's JSON + manifest file is not published, by design. - `version`: The landing zone version, for example, 3.2. """ diff --git a/src/services/cost_optimization_hub.jl b/src/services/cost_optimization_hub.jl index ea3d0ddd6a..10453701f1 100644 --- a/src/services/cost_optimization_hub.jl +++ b/src/services/cost_optimization_hub.jl @@ -112,7 +112,9 @@ for this API: recommendationIds, resourceArns, and resourceIds. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"filter"`: -- `"maxResults"`: The maximum number of recommendations that are returned for the request. +- `"maxResults"`: The maximum number of recommendations to be returned for the request. +- `"metrics"`: Additional metrics to be returned for the request. The only valid value is + savingsPercentage. - `"nextToken"`: The token to retrieve the next set of results. """ function list_recommendation_summaries( @@ -172,11 +174,11 @@ end update_enrollment_status(status, params::Dict{String,<:Any}) Updates the enrollment (opt in and opt out) status of an account to the Cost Optimization -Hub service. If the account is a management account of an organization, this action can -also be used to enroll member accounts of the organization. You must have the appropriate -permissions to opt in to Cost Optimization Hub and to view its recommendations. When you -opt in, Cost Optimization Hub automatically creates a service-linked role in your account -to access its data. +Hub service. If the account is a management account or delegated administrator of an +organization, this action can also be used to enroll member accounts of the organization. +You must have the appropriate permissions to opt in to Cost Optimization Hub and to view +its recommendations. When you opt in, Cost Optimization Hub automatically creates a +service-linked role in your account to access its data. # Arguments - `status`: Sets the account status. @@ -184,7 +186,7 @@ to access its data. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"includeMemberAccounts"`: Indicates whether to enroll member accounts of the - organization if the account is the management account. + organization if the account is the management account or delegated administrator. """ function update_enrollment_status(status; aws_config::AbstractAWSConfig=global_aws_config()) return cost_optimization_hub( diff --git a/src/services/datazone.jl b/src/services/datazone.jl index 522ae7d8ab..e41179bebe 100644 --- a/src/services/datazone.jl +++ b/src/services/datazone.jl @@ -65,6 +65,7 @@ Accepts a subscription request to a specific asset. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"assetScopes"`: The asset scopes of the accept subscription request. - `"decisionComment"`: A description that specifies the reason for accepting the specified subscription request. """ @@ -93,6 +94,134 @@ function accept_subscription_request( ) end +""" + add_entity_owner(domain_identifier, entity_identifier, entity_type, owner) + add_entity_owner(domain_identifier, entity_identifier, entity_type, owner, params::Dict{String,<:Any}) + +Adds the owner of an entity (a domain unit). + +# Arguments +- `domain_identifier`: The ID of the domain in which you want to add the entity owner. +- `entity_identifier`: The ID of the entity to which you want to add an owner. +- `entity_type`: The type of an entity. +- `owner`: The owner that you want to add to the entity. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that is provided to ensure the + idempotency of the request. +""" +function add_entity_owner( + domainIdentifier, + entityIdentifier, + entityType, + owner; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/entities/$(entityType)/$(entityIdentifier)/addOwner", + Dict{String,Any}("owner" => owner, "clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function add_entity_owner( + domainIdentifier, + entityIdentifier, + entityType, + owner, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/entities/$(entityType)/$(entityIdentifier)/addOwner", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("owner" => owner, "clientToken" => string(uuid4())), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + add_policy_grant(detail, domain_identifier, entity_identifier, entity_type, policy_type, principal) + add_policy_grant(detail, domain_identifier, entity_identifier, entity_type, policy_type, principal, params::Dict{String,<:Any}) + +Adds a policy grant (an authorization policy) to a specified entity, including domain +units, environment blueprint configurations, or environment profiles. + +# Arguments +- `detail`: The details of the policy grant. +- `domain_identifier`: The ID of the domain where you want to add a policy grant. +- `entity_identifier`: The ID of the entity (resource) to which you want to add a policy + grant. +- `entity_type`: The type of entity (resource) to which the grant is added. +- `policy_type`: The type of policy that you want to grant. +- `principal`: The principal to whom the permissions are granted. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that is provided to ensure the + idempotency of the request. +""" +function add_policy_grant( + detail, + domainIdentifier, + entityIdentifier, + entityType, + policyType, + principal; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/policies/managed/$(entityType)/$(entityIdentifier)/addGrant", + Dict{String,Any}( + "detail" => detail, + "policyType" => policyType, + "principal" => principal, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function add_policy_grant( + detail, + domainIdentifier, + entityIdentifier, + entityType, + policyType, + principal, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/policies/managed/$(entityType)/$(entityIdentifier)/addGrant", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "detail" => detail, + "policyType" => policyType, + "principal" => principal, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ associate_environment_role(domain_identifier, environment_identifier, environment_role_arn) associate_environment_role(domain_identifier, environment_identifier, environment_role_arn, params::Dict{String,<:Any}) @@ -281,6 +410,70 @@ function create_asset( ) end +""" + create_asset_filter(asset_identifier, configuration, domain_identifier, name) + create_asset_filter(asset_identifier, configuration, domain_identifier, name, params::Dict{String,<:Any}) + +Creates a data asset filter. + +# Arguments +- `asset_identifier`: The ID of the data asset. +- `configuration`: The configuration of the asset filter. +- `domain_identifier`: The ID of the domain in which you want to create an asset filter. +- `name`: The name of the asset filter. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that is provided to ensure the + idempotency of the request. +- `"description"`: The description of the asset filter. +""" +function create_asset_filter( + assetIdentifier, + configuration, + domainIdentifier, + name; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/assets/$(assetIdentifier)/filters", + Dict{String,Any}( + "configuration" => configuration, + "name" => name, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_asset_filter( + assetIdentifier, + configuration, + domainIdentifier, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/assets/$(assetIdentifier)/filters", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "configuration" => configuration, + "name" => name, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_asset_revision(domain_identifier, identifier, name) create_asset_revision(domain_identifier, identifier, name, params::Dict{String,<:Any}) @@ -401,6 +594,123 @@ function create_asset_type( ) end +""" + create_data_product(domain_identifier, name, owning_project_identifier) + create_data_product(domain_identifier, name, owning_project_identifier, params::Dict{String,<:Any}) + +Creates a data product. + +# Arguments +- `domain_identifier`: The ID of the domain where the data product is created. +- `name`: The name of the data product. +- `owning_project_identifier`: The ID of the owning project of the data product. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that is provided to ensure the + idempotency of the request. +- `"description"`: The description of the data product. +- `"formsInput"`: The metadata forms of the data product. +- `"glossaryTerms"`: The glossary terms of the data product. +- `"items"`: The data assets of the data product. +""" +function create_data_product( + domainIdentifier, + name, + owningProjectIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/data-products", + Dict{String,Any}( + "name" => name, + "owningProjectIdentifier" => owningProjectIdentifier, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_data_product( + domainIdentifier, + name, + owningProjectIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/data-products", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "name" => name, + "owningProjectIdentifier" => owningProjectIdentifier, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_data_product_revision(domain_identifier, identifier, name) + create_data_product_revision(domain_identifier, identifier, name, params::Dict{String,<:Any}) + +Creates a data product revision. + +# Arguments +- `domain_identifier`: The ID of the domain where the data product revision is created. +- `identifier`: The ID of the data product revision. +- `name`: The name of the data product revision. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that is provided to ensure the + idempotency of the request. +- `"description"`: The description of the data product revision. +- `"formsInput"`: The metadata forms of the data product revision. +- `"glossaryTerms"`: The glossary terms of the data product revision. +- `"items"`: The data assets of the data product revision. +""" +function create_data_product_revision( + domainIdentifier, identifier, name; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/data-products/$(identifier)/revisions", + Dict{String,Any}("name" => name, "clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_data_product_revision( + domainIdentifier, + identifier, + name, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/data-products/$(identifier)/revisions", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("name" => name, "clientToken" => string(uuid4())), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_data_source(domain_identifier, environment_identifier, name, project_identifier, type) create_data_source(domain_identifier, environment_identifier, name, project_identifier, type, params::Dict{String,<:Any}) @@ -547,6 +857,67 @@ function create_domain( ) end +""" + create_domain_unit(domain_identifier, name, parent_domain_unit_identifier) + create_domain_unit(domain_identifier, name, parent_domain_unit_identifier, params::Dict{String,<:Any}) + +Creates a domain unit in Amazon DataZone. + +# Arguments +- `domain_identifier`: The ID of the domain where you want to crate a domain unit. +- `name`: The name of the domain unit. +- `parent_domain_unit_identifier`: The ID of the parent domain unit. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that is provided to ensure the + idempotency of the request. +- `"description"`: The description of the domain unit. +""" +function create_domain_unit( + domainIdentifier, + name, + parentDomainUnitIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/domain-units", + Dict{String,Any}( + "name" => name, + "parentDomainUnitIdentifier" => parentDomainUnitIdentifier, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_domain_unit( + domainIdentifier, + name, + parentDomainUnitIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/domain-units", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "name" => name, + "parentDomainUnitIdentifier" => parentDomainUnitIdentifier, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_environment(domain_identifier, environment_profile_identifier, name, project_identifier) create_environment(domain_identifier, environment_profile_identifier, name, project_identifier, params::Dict{String,<:Any}) @@ -1073,6 +1444,8 @@ Creates an Amazon DataZone project. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"description"`: The description of the Amazon DataZone project. +- `"domainUnitId"`: The ID of the domain unit. This parameter is not required and if it is + not specified, then the project is created at the root domain unit level. - `"glossaryTerms"`: The glossary terms that can be used in this Amazon DataZone project. """ function create_project( @@ -1434,7 +1807,7 @@ end delete_asset(domain_identifier, identifier) delete_asset(domain_identifier, identifier, params::Dict{String,<:Any}) -Delets an asset in Amazon DataZone. +Deletes an asset in Amazon DataZone. # Arguments - `domain_identifier`: The ID of the Amazon DataZone domain in which the asset is deleted. @@ -1466,6 +1839,47 @@ function delete_asset( ) end +""" + delete_asset_filter(asset_identifier, domain_identifier, identifier) + delete_asset_filter(asset_identifier, domain_identifier, identifier, params::Dict{String,<:Any}) + +Deletes an asset filter. + +# Arguments +- `asset_identifier`: The ID of the data asset. +- `domain_identifier`: The ID of the domain where you want to delete an asset filter. +- `identifier`: The ID of the asset filter that you want to delete. + +""" +function delete_asset_filter( + assetIdentifier, + domainIdentifier, + identifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/assets/$(assetIdentifier)/filters/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_asset_filter( + assetIdentifier, + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/assets/$(assetIdentifier)/filters/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_asset_type(domain_identifier, identifier) delete_asset_type(domain_identifier, identifier, params::Dict{String,<:Any}) @@ -1503,6 +1917,43 @@ function delete_asset_type( ) end +""" + delete_data_product(domain_identifier, identifier) + delete_data_product(domain_identifier, identifier, params::Dict{String,<:Any}) + +Deletes a data product in Amazon DataZone. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which the data product is + deleted. +- `identifier`: The identifier of the data product that is deleted. + +""" +function delete_data_product( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/data-products/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_data_product( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/data-products/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_data_source(domain_identifier, identifier) delete_data_source(domain_identifier, identifier, params::Dict{String,<:Any}) @@ -1590,6 +2041,42 @@ function delete_domain( ) end +""" + delete_domain_unit(domain_identifier, identifier) + delete_domain_unit(domain_identifier, identifier, params::Dict{String,<:Any}) + +Deletes a domain unit. + +# Arguments +- `domain_identifier`: The ID of the domain where you want to delete a domain unit. +- `identifier`: The ID of the domain unit that you want to delete. + +""" +function delete_domain_unit( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/domain-units/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_domain_unit( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/domain-units/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_environment(domain_identifier, identifier) delete_environment(domain_identifier, identifier, params::Dict{String,<:Any}) @@ -2172,22 +2659,102 @@ function disassociate_environment_role( aws_config::AbstractAWSConfig=global_aws_config(), ) return datazone( - "DELETE", - "/v2/domains/$(domainIdentifier)/environments/$(environmentIdentifier)/roles/$(environmentRoleArn)"; + "DELETE", + "/v2/domains/$(domainIdentifier)/environments/$(environmentIdentifier)/roles/$(environmentRoleArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function disassociate_environment_role( + domainIdentifier, + environmentIdentifier, + environmentRoleArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "DELETE", + "/v2/domains/$(domainIdentifier)/environments/$(environmentIdentifier)/roles/$(environmentRoleArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_asset(domain_identifier, identifier) + get_asset(domain_identifier, identifier, params::Dict{String,<:Any}) + +Gets an Amazon DataZone asset. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain to which the asset belongs. +- `identifier`: The ID of the Amazon DataZone asset. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"revision"`: The revision of the Amazon DataZone asset. +""" +function get_asset( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/assets/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_asset( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/assets/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_asset_filter(asset_identifier, domain_identifier, identifier) + get_asset_filter(asset_identifier, domain_identifier, identifier, params::Dict{String,<:Any}) + +Gets an asset filter. + +# Arguments +- `asset_identifier`: The ID of the data asset. +- `domain_identifier`: The ID of the domain where you want to get an asset filter. +- `identifier`: The ID of the asset filter. + +""" +function get_asset_filter( + assetIdentifier, + domainIdentifier, + identifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/assets/$(assetIdentifier)/filters/$(identifier)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function disassociate_environment_role( +function get_asset_filter( + assetIdentifier, domainIdentifier, - environmentIdentifier, - environmentRoleArn, + identifier, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), ) return datazone( - "DELETE", - "/v2/domains/$(domainIdentifier)/environments/$(environmentIdentifier)/roles/$(environmentRoleArn)", + "GET", + "/v2/domains/$(domainIdentifier)/assets/$(assetIdentifier)/filters/$(identifier)", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -2195,30 +2762,30 @@ function disassociate_environment_role( end """ - get_asset(domain_identifier, identifier) - get_asset(domain_identifier, identifier, params::Dict{String,<:Any}) + get_asset_type(domain_identifier, identifier) + get_asset_type(domain_identifier, identifier, params::Dict{String,<:Any}) -Gets an Amazon DataZone asset. +Gets an Amazon DataZone asset type. # Arguments -- `domain_identifier`: The ID of the Amazon DataZone domain to which the asset belongs. -- `identifier`: The ID of the Amazon DataZone asset. +- `domain_identifier`: The ID of the Amazon DataZone domain in which the asset type exists. +- `identifier`: The ID of the asset type. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"revision"`: The revision of the Amazon DataZone asset. +- `"revision"`: The revision of the asset type. """ -function get_asset( +function get_asset_type( domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() ) return datazone( "GET", - "/v2/domains/$(domainIdentifier)/assets/$(identifier)"; + "/v2/domains/$(domainIdentifier)/asset-types/$(identifier)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function get_asset( +function get_asset_type( domainIdentifier, identifier, params::AbstractDict{String}; @@ -2226,7 +2793,7 @@ function get_asset( ) return datazone( "GET", - "/v2/domains/$(domainIdentifier)/assets/$(identifier)", + "/v2/domains/$(domainIdentifier)/asset-types/$(identifier)", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -2234,30 +2801,30 @@ function get_asset( end """ - get_asset_type(domain_identifier, identifier) - get_asset_type(domain_identifier, identifier, params::Dict{String,<:Any}) + get_data_product(domain_identifier, identifier) + get_data_product(domain_identifier, identifier, params::Dict{String,<:Any}) -Gets an Amazon DataZone asset type. +Gets the data product. # Arguments -- `domain_identifier`: The ID of the Amazon DataZone domain in which the asset type exists. -- `identifier`: The ID of the asset type. +- `domain_identifier`: The ID of the domain where the data product lives. +- `identifier`: The ID of the data product. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"revision"`: The revision of the asset type. +- `"revision"`: The revision of the data product. """ -function get_asset_type( +function get_data_product( domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() ) return datazone( "GET", - "/v2/domains/$(domainIdentifier)/asset-types/$(identifier)"; + "/v2/domains/$(domainIdentifier)/data-products/$(identifier)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end -function get_asset_type( +function get_data_product( domainIdentifier, identifier, params::AbstractDict{String}; @@ -2265,7 +2832,7 @@ function get_asset_type( ) return datazone( "GET", - "/v2/domains/$(domainIdentifier)/asset-types/$(identifier)", + "/v2/domains/$(domainIdentifier)/data-products/$(identifier)", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -2376,6 +2943,42 @@ function get_domain( ) end +""" + get_domain_unit(domain_identifier, identifier) + get_domain_unit(domain_identifier, identifier, params::Dict{String,<:Any}) + +Gets the details of the specified domain unit. + +# Arguments +- `domain_identifier`: The ID of the domain where you want to get a domain unit. +- `identifier`: The identifier of the domain unit that you want to get. + +""" +function get_domain_unit( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/domain-units/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_domain_unit( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/domain-units/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_environment(domain_identifier, identifier) get_environment(domain_identifier, identifier, params::Dict{String,<:Any}) @@ -2528,6 +3131,45 @@ function get_environment_blueprint_configuration( ) end +""" + get_environment_credentials(domain_identifier, environment_identifier) + get_environment_credentials(domain_identifier, environment_identifier, params::Dict{String,<:Any}) + +Gets the credentials of an environment in Amazon DataZone. + +# Arguments +- `domain_identifier`: The ID of the Amazon DataZone domain in which this environment and + its credentials exist. +- `environment_identifier`: The ID of the environment whose credentials this operation gets. + +""" +function get_environment_credentials( + domainIdentifier, + environmentIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/environments/$(environmentIdentifier)/credentials"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_environment_credentials( + domainIdentifier, + environmentIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/environments/$(environmentIdentifier)/credentials", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_environment_profile(domain_identifier, identifier) get_environment_profile(domain_identifier, identifier, params::Dict{String,<:Any}) @@ -2751,11 +3393,54 @@ function get_iam_portal_login_url( ) end +""" + get_lineage_node(domain_identifier, identifier) + get_lineage_node(domain_identifier, identifier, params::Dict{String,<:Any}) + +Gets the data lineage node. + +# Arguments +- `domain_identifier`: The ID of the domain in which you want to get the data lineage node. +- `identifier`: The ID of the data lineage node that you want to get. Both, a lineage node + identifier generated by Amazon DataZone and a sourceIdentifier of the lineage node are + supported. If sourceIdentifier is greater than 1800 characters, you can use lineage node + identifier generated by Amazon DataZone to get the node details. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"timestamp"`: The event time stamp for which you want to get the data lineage node. +""" +function get_lineage_node( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/lineage/nodes/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_lineage_node( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/lineage/nodes/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_listing(domain_identifier, identifier) get_listing(domain_identifier, identifier, params::Dict{String,<:Any}) -Gets a listing (a record of an asset at a given time). +Gets a listing (a record of an asset at a given time). If you specify a listing version, +only details that are specific to that version are returned. # Arguments - `domain_identifier`: The ID of the Amazon DataZone domain. @@ -3109,6 +3794,54 @@ function get_user_profile( ) end +""" + list_asset_filters(asset_identifier, domain_identifier) + list_asset_filters(asset_identifier, domain_identifier, params::Dict{String,<:Any}) + +Lists asset filters. + +# Arguments +- `asset_identifier`: The ID of the data asset. +- `domain_identifier`: The ID of the domain where you want to list asset filters. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of asset filters to return in a single call to + ListAssetFilters. When the number of asset filters to be listed is greater than the value + of MaxResults, the response contains a NextToken value that you can use in a subsequent + call to ListAssetFilters to list the next set of asset filters. +- `"nextToken"`: When the number of asset filters is greater than the default value for the + MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than + the number of asset filters, the response includes a pagination token named NextToken. You + can specify this NextToken value in a subsequent call to ListAssetFilters to list the next + set of asset filters. +- `"status"`: The status of the asset filter. +""" +function list_asset_filters( + assetIdentifier, domainIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/assets/$(assetIdentifier)/filters"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_asset_filters( + assetIdentifier, + domainIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/assets/$(assetIdentifier)/filters", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_asset_revisions(domain_identifier, identifier) list_asset_revisions(domain_identifier, identifier, params::Dict{String,<:Any}) @@ -3156,6 +3889,54 @@ function list_asset_revisions( ) end +""" + list_data_product_revisions(domain_identifier, identifier) + list_data_product_revisions(domain_identifier, identifier, params::Dict{String,<:Any}) + +Lists data product revisions. + +# Arguments +- `domain_identifier`: The ID of the domain of the data product revisions that you want to + list. +- `identifier`: The ID of the data product revision. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of asset filters to return in a single call to + ListDataProductRevisions. When the number of data product revisions to be listed is greater + than the value of MaxResults, the response contains a NextToken value that you can use in a + subsequent call to ListDataProductRevisions to list the next set of data product revisions. +- `"nextToken"`: When the number of data product revisions is greater than the default + value for the MaxResults parameter, or if you explicitly specify a value for MaxResults + that is less than the number of data product revisions, the response includes a pagination + token named NextToken. You can specify this NextToken value in a subsequent call to + ListDataProductRevisions to list the next set of data product revisions. +""" +function list_data_product_revisions( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/data-products/$(identifier)/revisions"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_data_product_revisions( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/data-products/$(identifier)/revisions", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_data_source_run_activities(domain_identifier, identifier) list_data_source_run_activities(domain_identifier, identifier, params::Dict{String,<:Any}) @@ -3314,6 +4095,65 @@ function list_data_sources( ) end +""" + list_domain_units_for_parent(domain_identifier, parent_domain_unit_identifier) + list_domain_units_for_parent(domain_identifier, parent_domain_unit_identifier, params::Dict{String,<:Any}) + +Lists child domain units for the specified parent domain unit. + +# Arguments +- `domain_identifier`: The ID of the domain in which you want to list domain units for a + parent domain unit. +- `parent_domain_unit_identifier`: The ID of the parent domain unit. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of domain units to return in a single call to + ListDomainUnitsForParent. When the number of domain units to be listed is greater than the + value of MaxResults, the response contains a NextToken value that you can use in a + subsequent call to ListDomainUnitsForParent to list the next set of domain units. +- `"nextToken"`: When the number of domain units is greater than the default value for the + MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than + the number of domain units, the response includes a pagination token named NextToken. You + can specify this NextToken value in a subsequent call to ListDomainUnitsForParent to list + the next set of domain units. +""" +function list_domain_units_for_parent( + domainIdentifier, + parentDomainUnitIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/domain-units", + Dict{String,Any}("parentDomainUnitIdentifier" => parentDomainUnitIdentifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_domain_units_for_parent( + domainIdentifier, + parentDomainUnitIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/domain-units", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "parentDomainUnitIdentifier" => parentDomainUnitIdentifier + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_domains() list_domains(params::Dict{String,<:Any}) @@ -3342,7 +4182,59 @@ function list_domains( params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return datazone( - "GET", "/v2/domains", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + "GET", "/v2/domains", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_entity_owners(domain_identifier, entity_identifier, entity_type) + list_entity_owners(domain_identifier, entity_identifier, entity_type, params::Dict{String,<:Any}) + +Lists the entity (domain units) owners. + +# Arguments +- `domain_identifier`: The ID of the domain where you want to list entity owners. +- `entity_identifier`: The ID of the entity that you want to list. +- `entity_type`: The type of the entity that you want to list. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of entities to return in a single call to + ListEntityOwners. When the number of entities to be listed is greater than the value of + MaxResults, the response contains a NextToken value that you can use in a subsequent call + to ListEntityOwners to list the next set of entities. +- `"nextToken"`: When the number of entities is greater than the default value for the + MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than + the number of entities, the response includes a pagination token named NextToken. You can + specify this NextToken value in a subsequent call to ListEntityOwners to list the next set + of entities. +""" +function list_entity_owners( + domainIdentifier, + entityIdentifier, + entityType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/entities/$(entityType)/$(entityIdentifier)/owners"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_entity_owners( + domainIdentifier, + entityIdentifier, + entityType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/entities/$(entityType)/$(entityIdentifier)/owners", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, ) end @@ -3602,6 +4494,62 @@ function list_environments( ) end +""" + list_lineage_node_history(domain_identifier, identifier) + list_lineage_node_history(domain_identifier, identifier, params::Dict{String,<:Any}) + +Lists the history of the specified data lineage node. + +# Arguments +- `domain_identifier`: The ID of the domain where you want to list the history of the + specified data lineage node. +- `identifier`: The ID of the data lineage node whose history you want to list. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"direction"`: The direction of the data lineage node refers to the lineage node having + neighbors in that direction. For example, if direction is UPSTREAM, the + ListLineageNodeHistory API responds with historical versions with upstream neighbors only. +- `"maxResults"`: The maximum number of history items to return in a single call to + ListLineageNodeHistory. When the number of memberships to be listed is greater than the + value of MaxResults, the response contains a NextToken value that you can use in a + subsequent call to ListLineageNodeHistory to list the next set of items. +- `"nextToken"`: When the number of history items is greater than the default value for the + MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than + the number of items, the response includes a pagination token named NextToken. You can + specify this NextToken value in a subsequent call to ListLineageNodeHistory to list the + next set of items. +- `"sortOrder"`: The order by which you want data lineage node history to be sorted. +- `"timestampGTE"`: Specifies whether the action is to return data lineage node history + from the time after the event timestamp. +- `"timestampLTE"`: Specifies whether the action is to return data lineage node history + from the time prior of the event timestamp. +""" +function list_lineage_node_history( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/lineage/nodes/$(identifier)/history"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_lineage_node_history( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/lineage/nodes/$(identifier)/history", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_metadata_generation_runs(domain_identifier) list_metadata_generation_runs(domain_identifier, params::Dict{String,<:Any}) @@ -3702,6 +4650,64 @@ function list_notifications( ) end +""" + list_policy_grants(domain_identifier, entity_identifier, entity_type, policy_type) + list_policy_grants(domain_identifier, entity_identifier, entity_type, policy_type, params::Dict{String,<:Any}) + +Lists policy grants. + +# Arguments +- `domain_identifier`: The ID of the domain where you want to list policy grants. +- `entity_identifier`: The ID of the entity for which you want to list policy grants. +- `entity_type`: The type of entity for which you want to list policy grants. +- `policy_type`: The type of policy that you want to list. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of grants to return in a single call to + ListPolicyGrants. When the number of grants to be listed is greater than the value of + MaxResults, the response contains a NextToken value that you can use in a subsequent call + to ListPolicyGrants to list the next set of grants. +- `"nextToken"`: When the number of grants is greater than the default value for the + MaxResults parameter, or if you explicitly specify a value for MaxResults that is less than + the number of grants, the response includes a pagination token named NextToken. You can + specify this NextToken value in a subsequent call to ListPolicyGrants to list the next set + of grants. +""" +function list_policy_grants( + domainIdentifier, + entityIdentifier, + entityType, + policyType; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/policies/managed/$(entityType)/$(entityIdentifier)/grants", + Dict{String,Any}("policyType" => policyType); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_policy_grants( + domainIdentifier, + entityIdentifier, + entityType, + policyType, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "GET", + "/v2/domains/$(domainIdentifier)/policies/managed/$(entityType)/$(entityIdentifier)/grants", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("policyType" => policyType), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_project_memberships(domain_identifier, project_identifier) list_project_memberships(domain_identifier, project_identifier, params::Dict{String,<:Any}) @@ -3819,6 +4825,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys less than the number of subscription grants, the response includes a pagination token named NextToken. You can specify this NextToken value in a subsequent call to ListSubscriptionGrants to list the next set of subscription grants. +- `"owningProjectId"`: The ID of the owning project of the subscription grants. - `"sortBy"`: Specifies the way of sorting the results of this action. - `"sortOrder"`: Specifies the sort order of this action. - `"subscribedListingId"`: The identifier of the subscribed listing. @@ -3873,7 +4880,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"owningProjectId"`: The identifier of the project for the subscription requests. - `"sortBy"`: Specifies the way to sort the results of this action. - `"sortOrder"`: Specifies the sort order for the results of this action. -- `"status"`: Specifies the status of the subscription requests. +- `"status"`: Specifies the status of the subscription requests. This is not a required + parameter, but if not specified, by default, Amazon DataZone returns only PENDING + subscription requests. - `"subscribedListingId"`: The identifier of the subscribed listing. """ function list_subscription_requests( @@ -3977,7 +4986,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"owningProjectId"`: The identifier of the owning project. - `"sortBy"`: Specifies the way in which the results of this action are to be sorted. - `"sortOrder"`: Specifies the sort order for the results of this action. -- `"status"`: The status of the subscriptions that you want to list. +- `"status"`: The status of the subscriptions that you want to list. This is not a + required parameter, but if not provided, by default, Amazon DataZone returns only APPROVED + subscriptions. - `"subscribedListingId"`: The identifier of the subscribed listing for the subscriptions that you want to list. - `"subscriptionRequestIdentifier"`: The identifier of the subscription request for the @@ -4102,6 +5113,54 @@ function list_time_series_data_points( ) end +""" + post_lineage_event(domain_identifier, event) + post_lineage_event(domain_identifier, event, params::Dict{String,<:Any}) + +Posts a data lineage event. + +# Arguments +- `domain_identifier`: The ID of the domain where you want to post a data lineage event. +- `event`: The data lineage event that you want to post. Only open-lineage run event are + supported as events. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that is provided to ensure the + idempotency of the request. +""" +function post_lineage_event( + domainIdentifier, event; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/lineage/events", + Dict{String,Any}("event" => event, "clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function post_lineage_event( + domainIdentifier, + event, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/lineage/events", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("event" => event, "clientToken" => string(uuid4())), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ post_time_series_data_points(domain_identifier, entity_identifier, entity_type, forms) post_time_series_data_points(domain_identifier, entity_identifier, entity_type, forms, params::Dict{String,<:Any}) @@ -4173,6 +5232,7 @@ Writes the configuration for the specified environment blueprint in Amazon DataZ # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"manageAccessRoleArn"`: The ARN of the manage access role. +- `"provisioningConfigurations"`: The provisioning configuration of a blueprint. - `"provisioningRoleArn"`: The ARN of the provisioning role. - `"regionalParameters"`: The regional parameters in the environment blueprint. """ @@ -4296,6 +5356,128 @@ function reject_subscription_request( ) end +""" + remove_entity_owner(domain_identifier, entity_identifier, entity_type, owner) + remove_entity_owner(domain_identifier, entity_identifier, entity_type, owner, params::Dict{String,<:Any}) + +Removes an owner from an entity. + +# Arguments +- `domain_identifier`: The ID of the domain where you want to remove an owner from an + entity. +- `entity_identifier`: The ID of the entity from which you want to remove an owner. +- `entity_type`: The type of the entity from which you want to remove an owner. +- `owner`: The owner that you want to remove from an entity. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that is provided to ensure the + idempotency of the request. +""" +function remove_entity_owner( + domainIdentifier, + entityIdentifier, + entityType, + owner; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/entities/$(entityType)/$(entityIdentifier)/removeOwner", + Dict{String,Any}("owner" => owner, "clientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function remove_entity_owner( + domainIdentifier, + entityIdentifier, + entityType, + owner, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/entities/$(entityType)/$(entityIdentifier)/removeOwner", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("owner" => owner, "clientToken" => string(uuid4())), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + remove_policy_grant(domain_identifier, entity_identifier, entity_type, policy_type, principal) + remove_policy_grant(domain_identifier, entity_identifier, entity_type, policy_type, principal, params::Dict{String,<:Any}) + +Removes a policy grant. + +# Arguments +- `domain_identifier`: The ID of the domain where you want to remove a policy grant. +- `entity_identifier`: The ID of the entity from which you want to remove a policy grant. +- `entity_type`: The type of the entity from which you want to remove a policy grant. +- `policy_type`: The type of the policy that you want to remove. +- `principal`: The principal from which you want to remove a policy grant. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that is provided to ensure the + idempotency of the request. +""" +function remove_policy_grant( + domainIdentifier, + entityIdentifier, + entityType, + policyType, + principal; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/policies/managed/$(entityType)/$(entityIdentifier)/removeGrant", + Dict{String,Any}( + "policyType" => policyType, + "principal" => principal, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function remove_policy_grant( + domainIdentifier, + entityIdentifier, + entityType, + policyType, + principal, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "POST", + "/v2/domains/$(domainIdentifier)/policies/managed/$(entityType)/$(entityIdentifier)/removeGrant", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "policyType" => policyType, + "principal" => principal, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ revoke_subscription(domain_identifier, identifier) revoke_subscription(domain_identifier, identifier, params::Dict{String,<:Any}) @@ -4796,6 +5978,52 @@ function untag_resource( ) end +""" + update_asset_filter(asset_identifier, domain_identifier, identifier) + update_asset_filter(asset_identifier, domain_identifier, identifier, params::Dict{String,<:Any}) + +Updates an asset filter. + +# Arguments +- `asset_identifier`: The ID of the data asset. +- `domain_identifier`: The ID of the domain where you want to update an asset filter. +- `identifier`: The ID of the asset filter. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"configuration"`: The configuration of the asset filter. +- `"description"`: The description of the asset filter. +- `"name"`: The name of the asset filter. +""" +function update_asset_filter( + assetIdentifier, + domainIdentifier, + identifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "PATCH", + "/v2/domains/$(domainIdentifier)/assets/$(assetIdentifier)/filters/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_asset_filter( + assetIdentifier, + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "PATCH", + "/v2/domains/$(domainIdentifier)/assets/$(assetIdentifier)/filters/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_data_source(domain_identifier, identifier) update_data_source(domain_identifier, identifier, params::Dict{String,<:Any}) @@ -4892,6 +6120,46 @@ function update_domain( ) end +""" + update_domain_unit(domain_identifier, identifier) + update_domain_unit(domain_identifier, identifier, params::Dict{String,<:Any}) + +Updates the domain unit. + +# Arguments +- `domain_identifier`: The ID of the domain where you want to update a domain unit. +- `identifier`: The ID of the domain unit that you want to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: The description of the domain unit that you want to update. +- `"name"`: The name of the domain unit that you want to update. +""" +function update_domain_unit( + domainIdentifier, identifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return datazone( + "PUT", + "/v2/domains/$(domainIdentifier)/domain-units/$(identifier)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_domain_unit( + domainIdentifier, + identifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return datazone( + "PUT", + "/v2/domains/$(domainIdentifier)/domain-units/$(identifier)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_environment(domain_identifier, identifier) update_environment(domain_identifier, identifier, params::Dict{String,<:Any}) @@ -5175,8 +6443,8 @@ end Updates the specified project in Amazon DataZone. # Arguments -- `domain_identifier`: The identifier of the Amazon DataZone domain in which a project is - to be updated. +- `domain_identifier`: The ID of the Amazon DataZone domain where a project is being + updated. - `identifier`: The identifier of the project that is to be updated. # Optional Parameters diff --git a/src/services/deadline.jl b/src/services/deadline.jl index 61902b502a..e16073753b 100644 --- a/src/services/deadline.jl +++ b/src/services/deadline.jl @@ -561,7 +561,9 @@ Creates a budget to set spending thresholds for your rendering activity. # Arguments - `actions`: The budget actions to specify what happens when the budget runs out. - `approximate_dollar_limit`: The dollar limit based on consumed usage. -- `display_name`: The display name of the budget. +- `display_name`: The display name of the budget. This field can store any content. Escape + or encode this content before displaying it on a webpage or any other system that might + interpret the content of this field. - `farm_id`: The farm ID to include in this budget. - `schedule`: The schedule to associate with this budget. - `usage_tracking_resource`: The queue ID provided to this budget to track usage. @@ -570,7 +572,9 @@ Creates a budget to set spending thresholds for your rendering activity. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"X-Amz-Client-Token"`: The unique token which the server uses to recognize retries of the same request. -- `"description"`: The description of the budget. +- `"description"`: The description of the budget. This field can store any content. Escape + or encode this content before displaying it on a webpage or any other system that might + interpret the content of this field. """ function create_budget( actions, @@ -638,13 +642,17 @@ budgets and allow you to enforce permissions. Deadline Cloud farms are a useful for large projects. # Arguments -- `display_name`: The display name of the farm. +- `display_name`: The display name of the farm. This field can store any content. Escape + or encode this content before displaying it on a webpage or any other system that might + interpret the content of this field. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"X-Amz-Client-Token"`: The unique token which the server uses to recognize retries of the same request. -- `"description"`: The description of the farm. +- `"description"`: The description of the farm. This field can store any content. Escape + or encode this content before displaying it on a webpage or any other system that might + interpret the content of this field. - `"kmsKeyArn"`: The ARN of the KMS key to use on the farm. - `"tags"`: The tags to add to your farm. Each tag consists of a tag key and a tag value. Tag keys and values are both required, but tag values can be empty strings. @@ -693,7 +701,9 @@ managed by Deadline Cloud. # Arguments - `configuration`: The configuration settings for the fleet. Customer managed fleets are self-managed. Service managed Amazon EC2 fleets are managed by Deadline Cloud. -- `display_name`: The display name of the fleet. +- `display_name`: The display name of the fleet. This field can store any content. Escape + or encode this content before displaying it on a webpage or any other system that might + interpret the content of this field. - `farm_id`: The farm ID of the farm to connect to the fleet. - `max_worker_count`: The maximum number of workers for the fleet. - `role_arn`: The IAM role ARN for the role that the fleet's workers will use. @@ -702,7 +712,9 @@ managed by Deadline Cloud. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"X-Amz-Client-Token"`: The unique token which the server uses to recognize retries of the same request. -- `"description"`: The description of the fleet. +- `"description"`: The description of the fleet. This field can store any content. Escape + or encode this content before displaying it on a webpage or any other system that might + interpret the content of this field. - `"minWorkerCount"`: The minimum number of workers for the fleet. - `"tags"`: Each tag consists of a tag key and a tag value. Tag keys and values are both required, but tag values can be empty strings. @@ -763,12 +775,13 @@ end create_job(farm_id, priority, queue_id, template, template_type) create_job(farm_id, priority, queue_id, template, template_type, params::Dict{String,<:Any}) -Creates a job. A job is a render submission submitted by a user. It contains specific job -properties outlined as steps and tasks. +Creates a job. A job is a set of instructions that AWS Deadline Cloud uses to schedule and +run work on available workers. For more information, see Deadline Cloud jobs. # Arguments - `farm_id`: The farm ID of the farm to connect to the job. -- `priority`: The priority of the job on a scale of 1 to 100. The highest priority is 1. +- `priority`: The priority of the job on a scale of 0 to 100. The highest priority (first + scheduled) is 100. When two jobs have the same priority, the oldest job is scheduled first. - `queue_id`: The ID of the queue that the job is submitted to. - `template`: The job template to use for this job. - `template_type`: The file type for the job template. @@ -781,12 +794,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys a render job. - `"maxFailedTasksCount"`: The number of task failures before the job stops running and is marked as FAILED. -- `"maxRetriesPerTask"`: The maximum number of retries for a job. +- `"maxRetriesPerTask"`: The maximum number of retries for each task. - `"parameters"`: The parameters for the job. - `"storageProfileId"`: The storage profile ID for the storage profile to connect to the job. -- `"targetTaskRunStatus"`: The initial status of the job's tasks when they are created. - Tasks that are created with a SUSPENDED status will not run until you update their status. +- `"targetTaskRunStatus"`: The initial job status when it is created. Jobs that are created + with a SUSPENDED status will not run until manually requeued. """ function create_job( farmId, @@ -910,7 +923,9 @@ steps that make up the job, and then download the job's results. # Arguments - `display_name`: The name that you give the monitor that is displayed in the Deadline - Cloud console. + Cloud console. This field can store any content. Escape or encode this content before + displaying it on a webpage or any other system that might interpret the content of this + field. - `identity_center_instance_arn`: The Amazon Resource Name (ARN) of the IAM Identity Center instance that authenticates monitor users. - `role_arn`: The Amazon Resource Name (ARN) of the IAM role that the monitor uses to @@ -982,7 +997,9 @@ Creates a queue to coordinate the order in which jobs run on a farm. A queue can specify where to pull resources and indicate where to output completed jobs. # Arguments -- `display_name`: The display name of the queue. +- `display_name`: The display name of the queue. This field can store any content. Escape + or encode this content before displaying it on a webpage or any other system that might + interpret the content of this field. - `farm_id`: The farm ID of the farm to connect to the queue. # Optional Parameters @@ -992,7 +1009,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"allowedStorageProfileIds"`: The storage profile IDs to include in the queue. - `"defaultBudgetAction"`: The default action to take on a queue if a budget isn't configured. -- `"description"`: The description of the queue. +- `"description"`: The description of the queue. This field can store any content. Escape + or encode this content before displaying it on a webpage or any other system that might + interpret the content of this field. - `"jobAttachmentSettings"`: The job attachment settings for the queue. These are the Amazon S3 bucket name and the Amazon S3 prefix. - `"jobRunAsUser"`: The jobs in the queue run as the specified POSIX user. @@ -1159,7 +1178,9 @@ Creates a storage profile that specifies the operating system, file type, and fi of resources used on a farm. # Arguments -- `display_name`: The display name of the storage profile. +- `display_name`: The display name of the storage profile. This field can store any + content. Escape or encode this content before displaying it on a webpage or any other + system that might interpret the content of this field. - `farm_id`: The farm ID of the farm to connect to the storage profile. - `os_family`: The type of operating system (OS) for the storage profile. @@ -1473,7 +1494,8 @@ end delete_queue(farm_id, queue_id) delete_queue(farm_id, queue_id, params::Dict{String,<:Any}) -Deletes a queue. +Deletes a queue. You can't recover the jobs in a queue if you delete the queue. Deleting +the queue also deletes the jobs in that queue. # Arguments - `farm_id`: The ID of the farm from which to remove the queue. @@ -2635,7 +2657,9 @@ Lists fleets. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"displayName"`: The display names of a list of fleets. +- `"displayName"`: The display names of a list of fleets. This field can store any + content. Escape or encode this content before displaying it on a webpage or any other + system that might interpret the content of this field. - `"maxResults"`: The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages. - `"nextToken"`: The token for the next set of results, or null to start from the beginning. @@ -2983,8 +3007,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"maxResults"`: The maximum number of results to return. Use this parameter with NextToken to get results as a set of sequential pages. - `"nextToken"`: The token for the next set of results, or null to start from the beginning. -- `"principalId"`: The principal ID. This filter is only valid when using Nimble Studio - credentials and should match the user ID in the credentials of the caller. +- `"principalId"`: The principal IDs to include in the list of queues. - `"status"`: The status of the queues listed. ACTIVE–The queues are active. SCHEDULING–The queues are scheduling. SCHEDULING_BLOCKED–The queue scheduling is blocked for these queues. @@ -3718,8 +3741,11 @@ end start_sessions_statistics_aggregation(end_time, farm_id, group_by, resource_ids, start_time, statistics, params::Dict{String,<:Any}) Starts an asynchronous request for getting aggregated statistics about queues and farms. -Get the statistics using the GetSessionsStatisticsAggregation operation. Statistics are -available for 1 hour after you call the StartSessionsStatisticsAggregation operation. +Get the statistics using the GetSessionsStatisticsAggregation operation. You can only have +one running aggregation for your Deadline Cloud farm. Call the +GetSessionsStatisticsAggregation operation and check the status field to see if an +aggregation is running. Statistics are available for 1 hour after you call the +StartSessionsStatisticsAggregation operation. # Arguments - `end_time`: The Linux timestamp of the date and time that the statistics end. @@ -3881,8 +3907,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"actionsToRemove"`: The budget actions to remove from the budget. - `"approximateDollarLimit"`: The dollar limit to update on the budget. Based on consumed usage. -- `"description"`: The description of the budget to update. -- `"displayName"`: The display name of the budget to update. +- `"description"`: The description of the budget to update. This field can store any + content. Escape or encode this content before displaying it on a webpage or any other + system that might interpret the content of this field. +- `"displayName"`: The display name of the budget to update. This field can store any + content. Escape or encode this content before displaying it on a webpage or any other + system that might interpret the content of this field. - `"schedule"`: The schedule to update. - `"status"`: Updates the status of the budget. ACTIVE–The budget is being evaluated. INACTIVE–The budget is inactive. This can include Expired, Canceled, or deleted Deleted @@ -3927,8 +3957,12 @@ Updates a farm. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"description"`: The description of the farm to update. -- `"displayName"`: The display name of the farm to update. +- `"description"`: The description of the farm to update. This field can store any + content. Escape or encode this content before displaying it on a webpage or any other + system that might interpret the content of this field. +- `"displayName"`: The display name of the farm to update. This field can store any + content. Escape or encode this content before displaying it on a webpage or any other + system that might interpret the content of this field. """ function update_farm(farmId; aws_config::AbstractAWSConfig=global_aws_config()) return deadline( @@ -3965,8 +3999,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"X-Amz-Client-Token"`: The unique token which the server uses to recognize retries of the same request. - `"configuration"`: The fleet configuration to update. -- `"description"`: The description of the fleet to update. -- `"displayName"`: The display name of the fleet to update. +- `"description"`: The description of the fleet to update. This field can store any + content. Escape or encode this content before displaying it on a webpage or any other + system that might interpret the content of this field. +- `"displayName"`: The display name of the fleet to update. This field can store any + content. Escape or encode this content before displaying it on a webpage or any other + system that might interpret the content of this field. - `"maxWorkerCount"`: The maximum number of workers in the fleet. - `"minWorkerCount"`: The minimum number of workers in the fleet. - `"roleArn"`: The IAM role ARN that the fleet's workers assume while running jobs. @@ -4003,7 +4041,9 @@ end update_job(farm_id, job_id, queue_id) update_job(farm_id, job_id, queue_id, params::Dict{String,<:Any}) -Updates a job. +Updates a job. When you change the status of the job to ARCHIVED, the job can't be +scheduled or archived. An archived jobs and its steps and tasks are deleted after 120 +days. The job can't be recovered. # Arguments - `farm_id`: The farm ID of the job to update. @@ -4014,7 +4054,9 @@ Updates a job. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"X-Amz-Client-Token"`: The unique token which the server uses to recognize retries of the same request. -- `"lifecycleStatus"`: The status of a job in its lifecycle. +- `"lifecycleStatus"`: The status of a job in its lifecycle. When you change the status of + the job to ARCHIVED, the job can't be scheduled or archived. An archived jobs and its + steps and tasks are deleted after 120 days. The job can't be recovered. - `"maxFailedTasksCount"`: The number of task failures before the job stops running and is marked as FAILED. - `"maxRetriesPerTask"`: The maximum number of retries for a job. @@ -4064,7 +4106,9 @@ settings when you call UpdateMonitor. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"displayName"`: The new value to use for the monitor's display name. +- `"displayName"`: The new value to use for the monitor's display name. This field can + store any content. Escape or encode this content before displaying it on a webpage or any + other system that might interpret the content of this field. - `"roleArn"`: The Amazon Resource Name (ARN) of the new IAM role to use with the monitor. - `"subdomain"`: The new value of the subdomain to use when forming the monitor URL. """ @@ -4107,8 +4151,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"allowedStorageProfileIdsToRemove"`: The storage profile ID to remove. - `"defaultBudgetAction"`: The default action to take for a queue update if a budget isn't configured. -- `"description"`: The description of the queue to update. -- `"displayName"`: The display name of the queue to update. +- `"description"`: The description of the queue to update. This field can store any + content. Escape or encode this content before displaying it on a webpage or any other + system that might interpret the content of this field. +- `"displayName"`: The display name of the queue to update. This field can store any + content. Escape or encode this content before displaying it on a webpage or any other + system that might interpret the content of this field. - `"jobAttachmentSettings"`: The job attachment settings to update for the queue. - `"jobRunAsUser"`: Update the jobs in the queue to run as a specified POSIX user. - `"requiredFileSystemLocationNamesToAdd"`: The required file system location names to add @@ -4378,7 +4426,9 @@ Updates a storage profile. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"X-Amz-Client-Token"`: The unique token which the server uses to recognize retries of the same request. -- `"displayName"`: The display name of the storage profile to update. +- `"displayName"`: The display name of the storage profile to update. This field can store + any content. Escape or encode this content before displaying it on a webpage or any other + system that might interpret the content of this field. - `"fileSystemLocationsToAdd"`: The file system location names to add. - `"fileSystemLocationsToRemove"`: The file system location names to remove. - `"osFamily"`: The OS system to update. diff --git a/src/services/device_farm.jl b/src/services/device_farm.jl index 975d3a3742..866fd97399 100644 --- a/src/services/device_farm.jl +++ b/src/services/device_farm.jl @@ -360,9 +360,8 @@ Uploads an app or test scripts. APPIUM_JAVA_TESTNG_TEST_PACKAGE APPIUM_PYTHON_TEST_PACKAGE APPIUM_NODE_TEST_PACKAGE APPIUM_RUBY_TEST_PACKAGE APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE APPIUM_WEB_PYTHON_TEST_PACKAGE - APPIUM_WEB_NODE_TEST_PACKAGE APPIUM_WEB_RUBY_TEST_PACKAGE CALABASH_TEST_PACKAGE - INSTRUMENTATION_TEST_PACKAGE UIAUTOMATION_TEST_PACKAGE UIAUTOMATOR_TEST_PACKAGE - XCTEST_TEST_PACKAGE XCTEST_UI_TEST_PACKAGE APPIUM_JAVA_JUNIT_TEST_SPEC + APPIUM_WEB_NODE_TEST_PACKAGE APPIUM_WEB_RUBY_TEST_PACKAGE INSTRUMENTATION_TEST_PACKAGE + XCTEST_TEST_PACKAGE XCTEST_UI_TEST_PACKAGE APPIUM_JAVA_JUNIT_TEST_SPEC APPIUM_JAVA_TESTNG_TEST_SPEC APPIUM_PYTHON_TEST_SPEC APPIUM_NODE_TEST_SPEC APPIUM_RUBY_TEST_SPEC APPIUM_WEB_JAVA_JUNIT_TEST_SPEC APPIUM_WEB_JAVA_TESTNG_TEST_SPEC APPIUM_WEB_PYTHON_TEST_SPEC APPIUM_WEB_NODE_TEST_SPEC APPIUM_WEB_RUBY_TEST_SPEC @@ -868,11 +867,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"configuration"`: An object that contains information about the settings for a run. - `"test"`: Information about the uploaded test to be run against the device pool. - `"testType"`: The test type for the specified device pool. Allowed values include the - following: BUILTIN_FUZZ. BUILTIN_EXPLORER. For Android, an app explorer that traverses - an Android app, interacting with it and capturing screenshots at the same time. - APPIUM_JAVA_JUNIT. APPIUM_JAVA_TESTNG. APPIUM_PYTHON. APPIUM_NODE. APPIUM_RUBY. - APPIUM_WEB_JAVA_JUNIT. APPIUM_WEB_JAVA_TESTNG. APPIUM_WEB_PYTHON. APPIUM_WEB_NODE. - APPIUM_WEB_RUBY. CALABASH. INSTRUMENTATION. UIAUTOMATION. UIAUTOMATOR. XCTEST. + following: BUILTIN_FUZZ. APPIUM_JAVA_JUNIT. APPIUM_JAVA_TESTNG. APPIUM_PYTHON. + APPIUM_NODE. APPIUM_RUBY. APPIUM_WEB_JAVA_JUNIT. APPIUM_WEB_JAVA_TESTNG. + APPIUM_WEB_PYTHON. APPIUM_WEB_NODE. APPIUM_WEB_RUBY. INSTRUMENTATION. XCTEST. XCTEST_UI. """ function get_device_pool_compatibility( @@ -2109,9 +2106,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys APPIUM_JAVA_TESTNG_TEST_PACKAGE APPIUM_PYTHON_TEST_PACKAGE APPIUM_NODE_TEST_PACKAGE APPIUM_RUBY_TEST_PACKAGE APPIUM_WEB_JAVA_JUNIT_TEST_PACKAGE APPIUM_WEB_JAVA_TESTNG_TEST_PACKAGE APPIUM_WEB_PYTHON_TEST_PACKAGE - APPIUM_WEB_NODE_TEST_PACKAGE APPIUM_WEB_RUBY_TEST_PACKAGE CALABASH_TEST_PACKAGE - INSTRUMENTATION_TEST_PACKAGE UIAUTOMATION_TEST_PACKAGE UIAUTOMATOR_TEST_PACKAGE - XCTEST_TEST_PACKAGE XCTEST_UI_TEST_PACKAGE APPIUM_JAVA_JUNIT_TEST_SPEC + APPIUM_WEB_NODE_TEST_PACKAGE APPIUM_WEB_RUBY_TEST_PACKAGE INSTRUMENTATION_TEST_PACKAGE + XCTEST_TEST_PACKAGE XCTEST_UI_TEST_PACKAGE APPIUM_JAVA_JUNIT_TEST_SPEC APPIUM_JAVA_TESTNG_TEST_SPEC APPIUM_PYTHON_TEST_SPEC APPIUM_NODE_TEST_SPEC APPIUM_RUBY_TEST_SPEC APPIUM_WEB_JAVA_JUNIT_TEST_SPEC APPIUM_WEB_JAVA_TESTNG_TEST_SPEC APPIUM_WEB_PYTHON_TEST_SPEC APPIUM_WEB_NODE_TEST_SPEC APPIUM_WEB_RUBY_TEST_SPEC diff --git a/src/services/direct_connect.jl b/src/services/direct_connect.jl index 2fe23f9070..1fb6be1354 100644 --- a/src/services/direct_connect.jl +++ b/src/services/direct_connect.jl @@ -69,7 +69,7 @@ end allocate_connection_on_interconnect(bandwidth, connection_name, interconnect_id, owner_account, vlan) allocate_connection_on_interconnect(bandwidth, connection_name, interconnect_id, owner_account, vlan, params::Dict{String,<:Any}) -Deprecated. Use AllocateHostedConnection instead. Creates a hosted connection on an + Deprecated. Use AllocateHostedConnection instead. Creates a hosted connection on an interconnect. Allocates a VLAN number and a specified amount of bandwidth for use by a hosted connection on the specified interconnect. Intended for use by Direct Connect Partners only. @@ -149,9 +149,9 @@ Intended for use by Direct Connect Partners only. # Arguments - `bandwidth`: The bandwidth of the connection. The possible values are 50Mbps, 100Mbps, - 200Mbps, 300Mbps, 400Mbps, 500Mbps, 1Gbps, 2Gbps, 5Gbps, and 10Gbps. Note that only those - Direct Connect Partners who have met specific requirements are allowed to create a 1Gbps, - 2Gbps, 5Gbps or 10Gbps hosted connection. + 200Mbps, 300Mbps, 400Mbps, 500Mbps, 1Gbps, 2Gbps, 5Gbps, 10Gbps, and 25Gbps. Note that only + those Direct Connect Partners who have met specific requirements are allowed to create a + 1Gbps, 2Gbps, 5Gbps, 10Gbps, or 25Gbps hosted connection. - `connection_id`: The ID of the interconnect or LAG. - `connection_name`: The name of the hosted connection. - `owner_account`: The ID of the Amazon Web Services account ID of the customer for the @@ -1098,7 +1098,7 @@ the VLAN assigned to them by the Direct Connect Partner. Intended for use by Di Connect Partners only. # Arguments -- `bandwidth`: The port bandwidth, in Gbps. The possible values are 1 and 10. +- `bandwidth`: The port bandwidth, in Gbps. The possible values are 1, 10, and 100. - `interconnect_name`: The name of the interconnect. - `location`: The location of the interconnect. @@ -1155,28 +1155,28 @@ Creates a link aggregation group (LAG) with the specified number of bundled phys dedicated connections between the customer network and a specific Direct Connect location. A LAG is a logical interface that uses the Link Aggregation Control Protocol (LACP) to aggregate multiple interfaces, enabling you to treat them as a single interface. All -connections in a LAG must use the same bandwidth (either 1Gbps or 10Gbps) and must -terminate at the same Direct Connect endpoint. You can have up to 10 dedicated connections -per LAG. Regardless of this limit, if you request more connections for the LAG than Direct -Connect can allocate on a single endpoint, no LAG is created. You can specify an existing -physical dedicated connection or interconnect to include in the LAG (which counts towards -the total number of connections). Doing so interrupts the current physical dedicated -connection, and re-establishes them as a member of the LAG. The LAG will be created on the -same Direct Connect endpoint to which the dedicated connection terminates. Any virtual -interfaces associated with the dedicated connection are automatically disassociated and -re-associated with the LAG. The connection ID does not change. If the Amazon Web Services -account used to create a LAG is a registered Direct Connect Partner, the LAG is -automatically enabled to host sub-connections. For a LAG owned by a partner, any associated -virtual interfaces cannot be directly configured. +connections in a LAG must use the same bandwidth (either 1Gbps, 10Gbps, 100Gbps, or +400Gbps) and must terminate at the same Direct Connect endpoint. You can have up to 10 +dedicated connections per location. Regardless of this limit, if you request more +connections for the LAG than Direct Connect can allocate on a single endpoint, no LAG is +created.. You can specify an existing physical dedicated connection or interconnect to +include in the LAG (which counts towards the total number of connections). Doing so +interrupts the current physical dedicated connection, and re-establishes them as a member +of the LAG. The LAG will be created on the same Direct Connect endpoint to which the +dedicated connection terminates. Any virtual interfaces associated with the dedicated +connection are automatically disassociated and re-associated with the LAG. The connection +ID does not change. If the Amazon Web Services account used to create a LAG is a registered +Direct Connect Partner, the LAG is automatically enabled to host sub-connections. For a LAG +owned by a partner, any associated virtual interfaces cannot be directly configured. # Arguments - `connections_bandwidth`: The bandwidth of the individual physical dedicated connections - bundled by the LAG. The possible values are 1Gbps and 10Gbps. + bundled by the LAG. The possible values are 1Gbps,10Gbps, 100Gbps, and 400Gbps. - `lag_name`: The name of the LAG. - `location`: The location for the LAG. - `number_of_connections`: The number of physical dedicated connections initially provisioned and bundled by the LAG. You can have a maximum of four connections when the - port speed is 1G or 10G, or two when the port speed is 100G. + port speed is 1Gbps or 10Gbps, or two when the port speed is 100Gbps or 400Gbps. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1245,7 +1245,7 @@ gateway or a Virtual Private Gateway (VGW). Connecting the private virtual inter Direct Connect gateway enables the possibility for connecting to multiple VPCs, including VPCs in different Amazon Web Services Regions. Connecting the private virtual interface to a VGW only provides access to a single VPC within the same Region. Setting the MTU of a -virtual interface to 9001 (jumbo frames) can cause an update to the underlying physical +virtual interface to 8500 (jumbo frames) can cause an update to the underlying physical connection if it wasn't updated to support jumbo frames. Updating the connection disrupts network connectivity for all virtual interfaces associated with the connection for up to 30 seconds. To check whether your connection supports jumbo frames, call DescribeConnections. @@ -1691,7 +1691,7 @@ end describe_connection_loa(connection_id) describe_connection_loa(connection_id, params::Dict{String,<:Any}) -Deprecated. Use DescribeLoa instead. Gets the LOA-CFA for a connection. The Letter of + Deprecated. Use DescribeLoa instead. Gets the LOA-CFA for a connection. The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is a document that your APN partner or service provider uses when establishing your cross connect to Amazon Web Services at the colocation facility. For more information, see Requesting Cross Connects at @@ -1763,7 +1763,7 @@ end describe_connections_on_interconnect(interconnect_id) describe_connections_on_interconnect(interconnect_id, params::Dict{String,<:Any}) -Deprecated. Use DescribeHostedConnections instead. Lists the connections that have been + Deprecated. Use DescribeHostedConnections instead. Lists the connections that have been provisioned on the specified interconnect. Intended for use by Direct Connect Partners only. @@ -2017,7 +2017,7 @@ end describe_interconnect_loa(interconnect_id) describe_interconnect_loa(interconnect_id, params::Dict{String,<:Any}) -Deprecated. Use DescribeLoa instead. Gets the LOA-CFA for the specified interconnect. The + Deprecated. Use DescribeLoa instead. Gets the LOA-CFA for the specified interconnect. The Letter of Authorization - Connecting Facility Assignment (LOA-CFA) is a document that is used when establishing your cross connect to Amazon Web Services at the colocation facility. For more information, see Requesting Cross Connects at Direct Connect Locations @@ -2252,8 +2252,10 @@ end describe_virtual_gateways() describe_virtual_gateways(params::Dict{String,<:Any}) -Lists the virtual private gateways owned by the Amazon Web Services account. You can create -one or more Direct Connect private virtual interfaces linked to a virtual private gateway. + Deprecated. Use DescribeVpnGateways instead. See DescribeVPNGateways in the Amazon Elastic +Compute Cloud API Reference. Lists the virtual private gateways owned by the Amazon Web +Services account. You can create one or more Direct Connect private virtual interfaces +linked to a virtual private gateway. """ function describe_virtual_gateways(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -2773,7 +2775,7 @@ end update_virtual_interface_attributes(virtual_interface_id, params::Dict{String,<:Any}) Updates the specified attributes of the specified virtual private interface. Setting the -MTU of a virtual interface to 9001 (jumbo frames) can cause an update to the underlying +MTU of a virtual interface to 8500 (jumbo frames) can cause an update to the underlying physical connection if it wasn't updated to support jumbo frames. Updating the connection disrupts network connectivity for all virtual interfaces associated with the connection for up to 30 seconds. To check whether your connection supports jumbo frames, call @@ -2787,7 +2789,7 @@ DescribeVirtualInterfaces. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"enableSiteLink"`: Indicates whether to enable or disable SiteLink. - `"mtu"`: The maximum transmission unit (MTU), in bytes. The supported values are 1500 and - 9001. The default value is 1500. + 8500. The default value is 1500. - `"virtualInterfaceName"`: The name of the virtual private interface. """ function update_virtual_interface_attributes( diff --git a/src/services/docdb.jl b/src/services/docdb.jl index 373f9fb640..2627b5caca 100644 --- a/src/services/docdb.jl +++ b/src/services/docdb.jl @@ -1903,6 +1903,78 @@ function failover_dbcluster( ) end +""" + failover_global_cluster(global_cluster_identifier, target_db_cluster_identifier) + failover_global_cluster(global_cluster_identifier, target_db_cluster_identifier, params::Dict{String,<:Any}) + +Promotes the specified secondary DB cluster to be the primary DB cluster in the global +cluster when failing over a global cluster occurs. Use this operation to respond to an +unplanned event, such as a regional disaster in the primary region. Failing over can result +in a loss of write transaction data that wasn't replicated to the chosen secondary before +the failover event occurred. However, the recovery process that promotes a DB instance on +the chosen seconday DB cluster to be the primary writer DB instance guarantees that the +data is in a transactionally consistent state. + +# Arguments +- `global_cluster_identifier`: The identifier of the Amazon DocumentDB global cluster to + apply this operation. The identifier is the unique key assigned by the user when the + cluster is created. In other words, it's the name of the global cluster. Constraints: + Must match the identifier of an existing global cluster. Minimum length of 1. Maximum + length of 255. Pattern: [A-Za-z][0-9A-Za-z-:._]* +- `target_db_cluster_identifier`: The identifier of the secondary Amazon DocumentDB cluster + that you want to promote to the primary for the global cluster. Use the Amazon Resource + Name (ARN) for the identifier so that Amazon DocumentDB can locate the cluster in its + Amazon Web Services region. Constraints: Must match the identifier of an existing + secondary cluster. Minimum length of 1. Maximum length of 255. Pattern: + [A-Za-z][0-9A-Za-z-:._]* + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AllowDataLoss"`: Specifies whether to allow data loss for this global cluster + operation. Allowing data loss triggers a global failover operation. If you don't specify + AllowDataLoss, the global cluster operation defaults to a switchover. Constraints: Can't + be specified together with the Switchover parameter. +- `"Switchover"`: Specifies whether to switch over this global database cluster. + Constraints: Can't be specified together with the AllowDataLoss parameter. +""" +function failover_global_cluster( + GlobalClusterIdentifier, + TargetDbClusterIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return docdb( + "FailoverGlobalCluster", + Dict{String,Any}( + "GlobalClusterIdentifier" => GlobalClusterIdentifier, + "TargetDbClusterIdentifier" => TargetDbClusterIdentifier, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function failover_global_cluster( + GlobalClusterIdentifier, + TargetDbClusterIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return docdb( + "FailoverGlobalCluster", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "GlobalClusterIdentifier" => GlobalClusterIdentifier, + "TargetDbClusterIdentifier" => TargetDbClusterIdentifier, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_tags_for_resource(resource_name) list_tags_for_resource(resource_name, params::Dict{String,<:Any}) diff --git a/src/services/dynamodb.jl b/src/services/dynamodb.jl index c7c65bfb39..9fc08a5e46 100644 --- a/src/services/dynamodb.jl +++ b/src/services/dynamodb.jl @@ -11,11 +11,11 @@ using AWS.UUIDs This operation allows you to perform batch reads or writes on data stored in DynamoDB, using PartiQL. Each read statement in a BatchExecuteStatement must specify an equality condition on all key attributes. This enforces that each SELECT statement in a batch -returns at most a single item. The entire batch must consist of either read statements or -write statements, you cannot mix both in one batch. A HTTP 200 response does not mean -that all statements in the BatchExecuteStatement succeeded. Error details for individual -statements can be found under the Error field of the BatchStatementResponse for each -statement. +returns at most a single item. For more information, see Running batch operations with +PartiQL for DynamoDB . The entire batch must consist of either read statements or write +statements, you cannot mix both in one batch. A HTTP 200 response does not mean that all +statements in the BatchExecuteStatement succeeded. Error details for individual statements +can be found under the Error field of the BatchStatementResponse for each statement. # Arguments - `statements`: The list of PartiQL statements representing the batch to run. @@ -165,9 +165,12 @@ internal processing failure occurs, the failed operations are returned in the UnprocessedItems response parameter. You can investigate and optionally resend the requests. Typically, you would call BatchWriteItem in a loop. Each iteration would check for unprocessed items and submit a new BatchWriteItem request with those unprocessed items -until all items have been processed. If none of the items can be processed due to -insufficient provisioned throughput on all of the tables in the request, then -BatchWriteItem returns a ProvisionedThroughputExceededException. If DynamoDB returns any +until all items have been processed. For tables and indexes with provisioned capacity, if +none of the items can be processed due to insufficient provisioned throughput on all of the +tables in the request, then BatchWriteItem returns a +ProvisionedThroughputExceededException. For all tables and indexes, if none of the items +can be processed due to other throttling scenarios (such as exceeding partition level +limits), then BatchWriteItem returns a ThrottlingException. If DynamoDB returns any unprocessed items, you should retry the batch operation on those items. However, we strongly recommend that you use an exponential backoff algorithm. If you retry the batch operation immediately, the underlying read or write requests can still fail due to @@ -733,11 +736,11 @@ does not exist, DynamoDB returns a ResourceNotFoundException. If table is alread DELETING state, no error is returned. For global tables, this operation only applies to global tables using Version 2019.11.21 (Current version). DynamoDB might continue to accept data read and write operations, such as GetItem and PutItem, on a table in the -DELETING state until the table deletion is complete. When you delete a table, any indexes -on that table are also deleted. If you have DynamoDB Streams enabled on the table, then the -corresponding stream on that table goes into the DISABLED state, and the stream is -automatically deleted after 24 hours. Use the DescribeTable action to check the status of -the table. +DELETING state until the table deletion is complete. For the full list of table states, see +TableStatus. When you delete a table, any indexes on that table are also deleted. If you +have DynamoDB Streams enabled on the table, then the corresponding stream on that table +goes into the DISABLED state, and the stream is automatically deleted after 24 hours. Use +the DescribeTable action to check the status of the table. # Arguments - `table_name`: The name of the table to delete. You can also provide the Amazon Resource diff --git a/src/services/ec2.jl b/src/services/ec2.jl index 505cf5fd84..781d810bc5 100644 --- a/src/services/ec2.jl +++ b/src/services/ec2.jl @@ -397,6 +397,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys let Amazon EC2 select an address from the address pool. Alternatively, specify a specific address from the address pool. - `"Domain"`: The network (vpc). +- `"IpamPoolId"`: The ID of an IPAM pool. - `"NetworkBorderGroup"`: A unique set of Availability Zones, Local Zones, or Wavelength Zones from which Amazon Web Services advertises IP addresses. Use this parameter to limit the IP address to this location. IP addresses cannot move between network border groups. @@ -2811,21 +2812,15 @@ end copy_image(name, source_image_id, source_region) copy_image(name, source_image_id, source_region, params::Dict{String,<:Any}) -Initiates the copy of an AMI. You can copy an AMI from one Region to another, or from a +Initiates an AMI copy operation. You can copy an AMI from one Region to another, or from a Region to an Outpost. You can't copy an AMI from an Outpost to a Region, from one Outpost to another, or within the same Outpost. To copy an AMI to another partition, see -CreateStoreImageTask. To copy an AMI from one Region to another, specify the source Region -using the SourceRegion parameter, and specify the destination Region using its endpoint. -Copies of encrypted backing snapshots for the AMI are encrypted. Copies of unencrypted -backing snapshots remain unencrypted, unless you set Encrypted during the copy operation. -You cannot create an unencrypted copy of an encrypted backing snapshot. To copy an AMI from -a Region to an Outpost, specify the source Region using the SourceRegion parameter, and -specify the ARN of the destination Outpost using DestinationOutpostArn. Backing snapshots -copied to an Outpost are encrypted by default using the default encryption key for the -Region, or a different key that you specify in the request using KmsKeyId. Outposts do not -support unencrypted snapshots. For more information, Amazon EBS local snapshots on -Outposts in the Amazon EBS User Guide. For more information about the prerequisites and -limits when copying an AMI, see Copy an AMI in the Amazon EC2 User Guide. +CreateStoreImageTask. When you copy an AMI from one Region to another, the destination +Region is the current Region. When you copy an AMI from a Region to an Outpost, specify the +ARN of the Outpost as the destination. Backing snapshots copied to an Outpost are encrypted +by default using the default encryption key for the Region or the key that you specify. +Outposts do not support unencrypted snapshots. For information about the prerequisites when +copying an AMI, see Copy an AMI in the Amazon EC2 User Guide. # Arguments - `name`: The name of the new AMI in the destination Region. @@ -2859,7 +2854,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys encrypted. You can encrypt a copy of an unencrypted snapshot, but you cannot create an unencrypted copy of an encrypted snapshot. The default KMS key for Amazon EBS is used unless you specify a non-default Key Management Service (KMS) KMS key using KmsKeyId. For - more information, see Amazon EBS encryption in the Amazon EBS User Guide. + more information, see Use encryption with EBS-backed AMIs in the Amazon EC2 User Guide. - `"kmsKeyId"`: The identifier of the symmetric Key Management Service (KMS) KMS key to use when creating encrypted volumes. If this parameter is not specified, your Amazon Web Services managed KMS key for Amazon EBS is used. If you specify a KMS key, you must also @@ -3132,6 +3127,69 @@ function create_capacity_reservation( ) end +""" + create_capacity_reservation_by_splitting(instance_count, source_capacity_reservation_id) + create_capacity_reservation_by_splitting(instance_count, source_capacity_reservation_id, params::Dict{String,<:Any}) + + Create a new Capacity Reservation by splitting the available capacity of the source +Capacity Reservation. The new Capacity Reservation will have the same attributes as the +source Capacity Reservation except for tags. The source Capacity Reservation must be active +and owned by your Amazon Web Services account. + +# Arguments +- `instance_count`: The number of instances to split from the source Capacity Reservation. +- `source_capacity_reservation_id`: The ID of the Capacity Reservation from which you want + to split the available capacity. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: Unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. For more information, see Ensure Idempotency. +- `"DryRun"`: Checks whether you have the required permissions for the action, without + actually making the request, and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +- `"TagSpecification"`: The tags to apply to the new Capacity Reservation. +""" +function create_capacity_reservation_by_splitting( + InstanceCount, + SourceCapacityReservationId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ec2( + "CreateCapacityReservationBySplitting", + Dict{String,Any}( + "InstanceCount" => InstanceCount, + "SourceCapacityReservationId" => SourceCapacityReservationId, + "ClientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_capacity_reservation_by_splitting( + InstanceCount, + SourceCapacityReservationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ec2( + "CreateCapacityReservationBySplitting", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "InstanceCount" => InstanceCount, + "SourceCapacityReservationId" => SourceCapacityReservationId, + "ClientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_capacity_reservation_fleet(instance_type_specification, total_target_capacity) create_capacity_reservation_fleet(instance_type_specification, total_target_capacity, params::Dict{String,<:Any}) @@ -4108,10 +4166,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. -- `"PreserveClientIp"`: Indicates whether your client's IP address is preserved as the - source. The value is true or false. If true, your client's IP address is used when you - connect to a resource. If false, the elastic network interface IP address is used when - you connect to a resource. Default: true +- `"PreserveClientIp"`: Indicates whether the client IP address is preserved as the source. + The following are the possible values. true - Use the client IP address as the source. + false - Use the network interface IP address as the source. Default: false - `"SecurityGroupId"`: One or more security groups to associate with the endpoint. If you don't specify a security group, the default security group for your VPC will be associated with the endpoint. @@ -4309,6 +4366,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DryRun"`: A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +- `"EnablePrivateGua"`: Enable this option to use your own GUA ranges as private IPv6 + addresses. This option is disabled by default. - `"OperatingRegion"`: The operating Regions for the IPAM. Operating Regions are Amazon Web Services Regions where the IPAM is allowed to manage IP address CIDRs. IPAM only discovers and monitors resources in the Amazon Web Services Regions you select as operating Regions. @@ -4343,6 +4402,54 @@ function create_ipam( ) end +""" + create_ipam_external_resource_verification_token(ipam_id) + create_ipam_external_resource_verification_token(ipam_id, params::Dict{String,<:Any}) + +Create a verification token. A verification token is an Amazon Web Services-generated +random value that you can use to prove ownership of an external resource. For example, you +can use a verification token to validate that you control a public IP address range when +you bring an IP address range to Amazon Web Services (BYOIP). + +# Arguments +- `ipam_id`: The ID of the IPAM that will create the token. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. For more information, see Ensuring idempotency. +- `"DryRun"`: A check for whether you have the required permissions for the action without + actually making the request and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +- `"TagSpecification"`: Token tags. +""" +function create_ipam_external_resource_verification_token( + IpamId; aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "CreateIpamExternalResourceVerificationToken", + Dict{String,Any}("IpamId" => IpamId, "ClientToken" => string(uuid4())); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_ipam_external_resource_verification_token( + IpamId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "CreateIpamExternalResourceVerificationToken", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("IpamId" => IpamId, "ClientToken" => string(uuid4())), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_ipam_pool(address_family, ipam_scope_id) create_ipam_pool(address_family, ipam_scope_id, params::Dict{String,<:Any}) @@ -4392,13 +4499,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DryRun"`: A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. -- `"Locale"`: In IPAM, the locale is the Amazon Web Services Region where you want to make - an IPAM pool available for allocations. Only resources in the same Region as the locale of - the pool can get IP address allocations from the pool. You can only allocate a CIDR for a - VPC, for example, from an IPAM pool that shares a locale with the VPC’s Region. Note that - once you choose a Locale for a pool, you cannot modify it. If you do not choose a locale, - resources in Regions others than the IPAM's home region cannot use CIDRs from this pool. - Possible values: Any Amazon Web Services Region, such as us-east-1. +- `"Locale"`: The locale for the pool should be one of the following: An Amazon Web + Services Region where you want this IPAM pool to be available for allocations. The + network border group for an Amazon Web Services Local Zone where you want this IPAM pool to + be available for allocations (supported Local Zones). This option is only available for + IPAM IPv4 pools in the public scope. If you do not choose a locale, resources in Regions + others than the IPAM's home region cannot use CIDRs from this pool. Possible values: Any + Amazon Web Services Region or supported Amazon Web Services Local Zone. - `"PublicIpSource"`: The IP address source for pools in the public scope. Only used for provisioning IP address CIDRs to pools in the public scope. Default is byoip. For more information, see Create IPv6 pools in the Amazon VPC IPAM User Guide. By default, you can @@ -5450,7 +5557,7 @@ Services account only, and only one account at a time. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"AwsAccountId"`: The Amazon Web Services account ID. -- `"AwsService"`: The Amazon Web Service. Currently not supported. +- `"AwsService"`: The Amazon Web Services service. Currently not supported. - `"DryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -5546,6 +5653,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DryRun"`: A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +- `"NetworkBorderGroup"`: The Availability Zone (AZ) or Local Zone (LZ) network border + group that the resource that the IP address is assigned to is in. Defaults to an AZ network + border group. For more information on available Local Zones, see Local Zone availability in + the Amazon EC2 User Guide. - `"TagSpecification"`: The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for @@ -7549,7 +7660,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys enable Multi-Attach, you can attach the volume to up to 16 Instances built on the Nitro System in the same Availability Zone. This parameter is supported with io1 and io2 volumes only. For more information, see Amazon EBS Multi-Attach in the Amazon EBS User Guide. -- `"OutpostArn"`: The Amazon Resource Name (ARN) of the Outpost. +- `"OutpostArn"`: The Amazon Resource Name (ARN) of the Outpost on which to create the + volume. If you intend to use a volume with an instance running on an outpost, then you must + create the volume on the same outpost as the instance. You can't use a volume created in an + Amazon Web Services Region with an instance on an Amazon Web Services outpost, or the other + way around. - `"Size"`: The size of the volume, in GiBs. You must specify either a snapshot ID or a volume size. If you specify a snapshot, the default is the snapshot size. You can specify a volume size that is equal to or larger than the snapshot size. The following are the @@ -8748,6 +8863,60 @@ function delete_ipam( ) end +""" + delete_ipam_external_resource_verification_token(ipam_external_resource_verification_token_id) + delete_ipam_external_resource_verification_token(ipam_external_resource_verification_token_id, params::Dict{String,<:Any}) + +Delete a verification token. A verification token is an Amazon Web Services-generated +random value that you can use to prove ownership of an external resource. For example, you +can use a verification token to validate that you control a public IP address range when +you bring an IP address range to Amazon Web Services (BYOIP). + +# Arguments +- `ipam_external_resource_verification_token_id`: The token ID. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: A check for whether you have the required permissions for the action without + actually making the request and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +""" +function delete_ipam_external_resource_verification_token( + IpamExternalResourceVerificationTokenId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ec2( + "DeleteIpamExternalResourceVerificationToken", + Dict{String,Any}( + "IpamExternalResourceVerificationTokenId" => + IpamExternalResourceVerificationTokenId, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_ipam_external_resource_verification_token( + IpamExternalResourceVerificationTokenId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ec2( + "DeleteIpamExternalResourceVerificationToken", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "IpamExternalResourceVerificationTokenId" => + IpamExternalResourceVerificationTokenId, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_ipam_pool(ipam_pool_id) delete_ipam_pool(ipam_pool_id, params::Dict{String,<:Any}) @@ -9693,6 +9862,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DryRun"`: A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +- `"NetworkBorderGroup"`: The Availability Zone (AZ) or Local Zone (LZ) network border + group that the resource that the IP address is assigned to is in. Defaults to an AZ network + border group. For more information on available Local Zones, see Local Zone availability in + the Amazon EC2 User Guide. """ function delete_public_ipv4_pool(PoolId; aws_config::AbstractAWSConfig=global_aws_config()) return ec2( @@ -9844,8 +10017,8 @@ end delete_security_group(params::Dict{String,<:Any}) Deletes a security group. If you attempt to delete a security group that is associated with -an instance or network interface or is referenced by another security group, the operation -fails with DependencyViolation. +an instance or network interface or is referenced by another security group in the same +VPC, the operation fails with DependencyViolation. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -11775,7 +11948,7 @@ source account starts the transfer, the transfer account has seven days to accep Elastic IP address transfer. During those seven days, the source account can view the pending transfer by using this action. After seven days, the transfer expires and ownership of the Elastic IP address returns to the source account. Accepted transfers are visible to -the source account for three days after the transfers have been accepted. +the source account for 14 days after the transfers have been accepted. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -14024,7 +14197,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys system-status.reachability - Filters on system status where the name is reachability (passed | failed | initializing | insufficient-data). system-status.status - The system status of the instance (ok | impaired | initializing | insufficient-data | not-applicable). - + attached-ebs-status.status - The status of the attached EBS volume for the instance (ok + | impaired | initializing | insufficient-data | not-applicable). - `"InstanceId"`: The instance IDs. Default: Describes all your instances. Constraints: Maximum 100 explicitly specified instance IDs. - `"MaxResults"`: The maximum number of items to return for this request. To get the next @@ -14555,6 +14729,49 @@ function describe_ipam_byoasn( ) end +""" + describe_ipam_external_resource_verification_tokens() + describe_ipam_external_resource_verification_tokens(params::Dict{String,<:Any}) + +Describe verification tokens. A verification token is an Amazon Web Services-generated +random value that you can use to prove ownership of an external resource. For example, you +can use a verification token to validate that you control a public IP address range when +you bring an IP address range to Amazon Web Services (BYOIP). + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"DryRun"`: A check for whether you have the required permissions for the action without + actually making the request and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +- `"Filter"`: One or more filters for the request. For more information about filtering, + see Filtering CLI output. Available filters: ipam-arn + ipam-external-resource-verification-token-arn + ipam-external-resource-verification-token-id ipam-id ipam-region state + status token-name token-value +- `"IpamExternalResourceVerificationTokenId"`: Verification token IDs. +- `"MaxResults"`: The maximum number of tokens to return in one page of results. +- `"NextToken"`: The token for the next page of results. +""" +function describe_ipam_external_resource_verification_tokens(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "DescribeIpamExternalResourceVerificationTokens"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_ipam_external_resource_verification_tokens( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ec2( + "DescribeIpamExternalResourceVerificationTokens", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_ipam_pools() describe_ipam_pools(params::Dict{String,<:Any}) @@ -15620,7 +15837,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys network-interface-permission.network-interface-permission-id - The ID of the permission. network-interface-permission.network-interface-id - The ID of the network interface. network-interface-permission.aws-account-id - The Amazon Web Services account ID. - network-interface-permission.aws-service - The Amazon Web Service. + network-interface-permission.aws-service - The Amazon Web Services service. network-interface-permission.permission - The type of permission (INSTANCE-ATTACH | EIP-ASSOCIATE). - `"MaxResults"`: The maximum number of items to return for this request. To get the next @@ -15708,8 +15925,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys (IPv4). private-ip-address - The private IPv4 address or addresses of the network interface. requester-id - The alias or Amazon Web Services account ID of the principal or service that created the network interface. requester-managed - Indicates whether the - network interface is being managed by an Amazon Web Service (for example, Amazon Web - Services Management Console, Auto Scaling, and so on). source-dest-check - Indicates + network interface is being managed by an Amazon Web Services service (for example, Amazon + Web Services Management Console, Auto Scaling, and so on). source-dest-check - Indicates whether the network interface performs source/destination checking. A value of true means checking is enabled, and false means checking is disabled. The value must be false for the network interface to perform network address translation (NAT) in your VPC. status - @@ -15743,8 +15960,11 @@ end describe_placement_groups() describe_placement_groups(params::Dict{String,<:Any}) -Describes the specified placement groups or all of your placement groups. For more -information, see Placement groups in the Amazon EC2 User Guide. +Describes the specified placement groups or all of your placement groups. To describe a +specific placement group that is shared with your account, you must specify the ID of the +placement group using the GroupId parameter. Specifying the name of a shared placement +group using the GroupNames parameter will result in an error. For more information, see +Placement groups in the Amazon EC2 User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -15762,8 +15982,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. -- `"groupName"`: The names of the placement groups. Default: Describes all your placement - groups, or only those otherwise specified. +- `"groupName"`: The names of the placement groups. Constraints: You can specify a name + only if the placement group is owned by your account. If a placement group is shared with + your account, specifying the name results in an error. You must use the GroupId parameter + instead. """ function describe_placement_groups(; aws_config::AbstractAWSConfig=global_aws_config()) return ec2( @@ -16239,26 +16461,27 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the route table. route-table-id - The ID of the route table. route.destination-cidr-block - The IPv4 CIDR range specified in a route in the table. route.destination-ipv6-cidr-block - The IPv6 CIDR range specified in a route in the route - table. route.destination-prefix-list-id - The ID (prefix) of the Amazon Web Service - specified in a route in the table. route.egress-only-internet-gateway-id - The ID of an - egress-only Internet gateway specified in a route in the route table. route.gateway-id - - The ID of a gateway specified in a route in the table. route.instance-id - The ID of an - instance specified in a route in the table. route.nat-gateway-id - The ID of a NAT - gateway. route.transit-gateway-id - The ID of a transit gateway. route.origin - - Describes how the route was created. CreateRouteTable indicates that the route was - automatically created when the route table was created; CreateRoute indicates that the - route was manually added to the route table; EnableVgwRoutePropagation indicates that the - route was propagated by route propagation. route.state - The state of a route in the - route table (active | blackhole). The blackhole state indicates that the route's target - isn't available (for example, the specified gateway isn't attached to the VPC, the - specified NAT instance has been terminated, and so on). route.vpc-peering-connection-id - - The ID of a VPC peering connection specified in a route in the table. tag:<key> - - The key/value combination of a tag assigned to the resource. Use the tag key in the - filter name and the tag value as the filter value. For example, to find all resources that - have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name - and TeamA for the filter value. tag-key - The key of a tag assigned to the resource. Use - this filter to find all resources assigned a tag with a specific key, regardless of the tag - value. vpc-id - The ID of the VPC for the route table. + table. route.destination-prefix-list-id - The ID (prefix) of the Amazon Web Services + service specified in a route in the table. route.egress-only-internet-gateway-id - The + ID of an egress-only Internet gateway specified in a route in the route table. + route.gateway-id - The ID of a gateway specified in a route in the table. + route.instance-id - The ID of an instance specified in a route in the table. + route.nat-gateway-id - The ID of a NAT gateway. route.transit-gateway-id - The ID of a + transit gateway. route.origin - Describes how the route was created. CreateRouteTable + indicates that the route was automatically created when the route table was created; + CreateRoute indicates that the route was manually added to the route table; + EnableVgwRoutePropagation indicates that the route was propagated by route propagation. + route.state - The state of a route in the route table (active | blackhole). The blackhole + state indicates that the route's target isn't available (for example, the specified gateway + isn't attached to the VPC, the specified NAT instance has been terminated, and so on). + route.vpc-peering-connection-id - The ID of a VPC peering connection specified in a route + in the table. tag:<key> - The key/value combination of a tag assigned to the + resource. Use the tag key in the filter name and the tag value as the filter value. For + example, to find all resources that have a tag with the key Owner and the value TeamA, + specify tag:Owner for the filter name and TeamA for the filter value. tag-key - The key + of a tag assigned to the resource. Use this filter to find all resources assigned a tag + with a specific key, regardless of the tag value. vpc-id - The ID of the VPC for the + route table. - `"MaxResults"`: The maximum number of items to return for this request. To get the next page of items, make another request with the token returned in the output. For more information, see Pagination. @@ -17033,9 +17256,9 @@ end describe_stale_security_groups(vpc_id, params::Dict{String,<:Any}) Describes the stale security group rules for security groups in a specified VPC. Rules are -stale when they reference a deleted security group in the same VPC or peered VPC. Rules can -also be stale if they reference a security group in a peer VPC for which the VPC peering -connection has been deleted. +stale when they reference a deleted security group in a peered VPC. Rules can also be stale +if they reference a security group in a peer VPC for which the VPC peering connection has +been deleted. # Arguments - `vpc_id`: The ID of the VPC. @@ -18215,7 +18438,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys assigned a tag with a specific key, regardless of the tag value. volume-id - The volume ID. volume-type - The Amazon EBS volume type (gp2 | gp3 | io1 | io2 | st1 | sc1| standard) -- `"VolumeId"`: The volume IDs. +- `"VolumeId"`: The volume IDs. If not specified, then all volumes are included in the + response. - `"dryRun"`: Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. @@ -18240,11 +18464,9 @@ end describe_volumes_modifications() describe_volumes_modifications(params::Dict{String,<:Any}) -Describes the most recent volume modification request for the specified EBS volumes. If a -volume has never been modified, some information in the output will be null. If a volume -has been modified more than once, the output includes only the most recent modification -request. For more information, see Monitor the progress of volume modifications in the -Amazon EBS User Guide. +Describes the most recent volume modification request for the specified EBS volumes. For +more information, see Monitor the progress of volume modifications in the Amazon EBS User +Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -19642,11 +19864,13 @@ end Disables the block public access for snapshots setting at the account level for the specified Amazon Web Services Region. After you disable block public access for snapshots -in a Region, users can publicly share snapshots in that Region. If block public access is -enabled in block-all-sharing mode, and you disable block public access, all snapshots that -were previously publicly shared are no longer treated as private and they become publicly -accessible again. For more information, see Block public access for snapshots in the -Amazon EBS User Guide . +in a Region, users can publicly share snapshots in that Region. Enabling block public +access for snapshots in block-all-sharing mode does not change the permissions for +snapshots that are already publicly shared. Instead, it prevents these snapshots from be +publicly visible and publicly accessible. Therefore, the attributes for these snapshots +still indicate that they are publicly shared, even though they are not publicly available. +If you disable block public access , these snapshots will become publicly available again. +For more information, see Block public access for snapshots in the Amazon EBS User Guide . # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -21108,26 +21332,24 @@ Enables or modifies the block public access for snapshots setting at the account the specified Amazon Web Services Region. After you enable block public access for snapshots in a Region, users can no longer request public sharing for snapshots in that Region. Snapshots that are already publicly shared are either treated as private or they -remain publicly shared, depending on the State that you specify. If block public access is -enabled in block-all-sharing mode, and you change the mode to block-new-sharing, all -snapshots that were previously publicly shared are no longer treated as private and they -become publicly accessible again. For more information, see Block public access for -snapshots in the Amazon EBS User Guide. +remain publicly shared, depending on the State that you specify. Enabling block public +access for snapshots in block all sharing mode does not change the permissions for +snapshots that are already publicly shared. Instead, it prevents these snapshots from be +publicly visible and publicly accessible. Therefore, the attributes for these snapshots +still indicate that they are publicly shared, even though they are not publicly available. +If you later disable block public access or change the mode to block new sharing, these +snapshots will become publicly available again. For more information, see Block public +access for snapshots in the Amazon EBS User Guide. # Arguments - `state`: The mode in which to enable block public access for snapshots for the Region. Specify one of the following values: block-all-sharing - Prevents all public sharing of snapshots in the Region. Users in the account will no longer be able to request new public sharing. Additionally, snapshots that are already publicly shared are treated as private - and they are no longer publicly available. If you enable block public access for snapshots - in block-all-sharing mode, it does not change the permissions for snapshots that are - already publicly shared. Instead, it prevents these snapshots from be publicly visible and - publicly accessible. Therefore, the attributes for these snapshots still indicate that they - are publicly shared, even though they are not publicly available. block-new-sharing - - Prevents only new public sharing of snapshots in the Region. Users in the account will no - longer be able to request new public sharing. However, snapshots that are already publicly - shared, remain publicly available. unblocked is not a valid value for - EnableSnapshotBlockPublicAccess. + and they are no longer publicly available. block-new-sharing - Prevents only new public + sharing of snapshots in the Region. Users in the account will no longer be able to request + new public sharing. However, snapshots that are already publicly shared, remain publicly + available. unblocked is not a valid value for EnableSnapshotBlockPublicAccess. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -21818,13 +22040,8 @@ end Gets the console output for the specified instance. For Linux instances, the instance console output displays the exact console output that would normally be displayed on a physical monitor attached to a computer. For Windows instances, the instance console output -includes the last three system event log errors. By default, the console output returns -buffered information that was posted shortly after an instance transition state (start, -stop, reboot, or terminate). This information is available for at least one hour after the -most recent post. Only the most recent 64 KB of console output is available. You can -optionally retrieve the latest serial console output at any time during the instance -lifecycle. This option is supported on instance types that use the Nitro hypervisor. For -more information, see Instance console output in the Amazon EC2 User Guide. +includes the last three system event log errors. For more information, see Instance console +output in the Amazon EC2 User Guide. # Arguments - `instance_id`: The ID of the instance. @@ -24564,11 +24781,12 @@ end modify_capacity_reservation(capacity_reservation_id) modify_capacity_reservation(capacity_reservation_id, params::Dict{String,<:Any}) -Modifies a Capacity Reservation's capacity and the conditions under which it is to be -released. You cannot change a Capacity Reservation's instance type, EBS optimization, -instance store settings, platform, Availability Zone, or instance eligibility. If you need +Modifies a Capacity Reservation's capacity, instance eligibility, and the conditions under +which it is to be released. You can't modify a Capacity Reservation's instance type, EBS +optimization, platform, instance store settings, Availability Zone, or tenancy. If you need to modify any of these attributes, we recommend that you cancel the Capacity Reservation, -and then create a new one with the required attributes. +and then create a new one with the required attributes. For more information, see Modify an +active Capacity Reservation. # Arguments - `capacity_reservation_id`: The ID of the Capacity Reservation. @@ -24594,6 +24812,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys a specified date and time. You must provide an EndDate value if EndDateType is limited. - `"InstanceCount"`: The number of instances for which to reserve capacity. The number of instances can't be increased or decreased by more than 1000 in a single request. +- `"InstanceMatchCriteria"`: The matching criteria (instance eligibility) that you want to + use in the modified Capacity Reservation. If you change the instance eligibility of an + existing Capacity Reservation from targeted to open, any running instances that match the + attributes of the Capacity Reservation, have the CapacityReservationPreference set to open, + and are not yet running in the Capacity Reservation, will automatically use the modified + Capacity Reservation. To modify the instance eligibility, the Capacity Reservation must be + completely idle (zero usage). """ function modify_capacity_reservation( CapacityReservationId; aws_config::AbstractAWSConfig=global_aws_config() @@ -25306,9 +25531,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Virtual Function interface for the instance. There is no way to disable enhanced networking with the Intel 82599 Virtual Function interface at this time. This option is supported only for HVM instances. Specifying this option with a PV instance can make it unreachable. -- `"userData"`: Changes the instance's user data to the specified value. If you are using - an Amazon Web Services SDK or command line tool, base64-encoding is performed for you, and - you can load the text from a file. Otherwise, you must provide base64-encoded text. +- `"userData"`: Changes the instance's user data to the specified value. User data must be + base64-encoded. Depending on the tool or SDK that you're using, the base64-encoding might + be performed for you. For more information, see Work with instance user data. - `"value"`: A new value for the attribute. Use only with the kernel, ramdisk, userData, disableApiTermination, or instanceInitiatedShutdownBehavior attribute. """ @@ -25810,6 +26035,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DryRun"`: A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +- `"EnablePrivateGua"`: Enable this option to use your own GUA ranges as private IPv6 + addresses. This option is disabled by default. - `"RemoveOperatingRegion"`: The operating Regions to remove. - `"Tier"`: IPAM is offered in a Free Tier and an Advanced Tier. For more information about the features available in each tier and the costs associated with the tiers, see Amazon VPC @@ -26625,7 +26852,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DisableLniAtDeviceIndex"`: Specify true to indicate that local network interfaces at the current position should be disabled. - `"EnableDns64"`: Indicates whether DNS queries made to the Amazon-provided DNS Resolver - in this subnet should return synthetic IPv6 addresses for IPv4-only destinations. + in this subnet should return synthetic IPv6 addresses for IPv4-only destinations. You must + first configure a NAT gateway in a public subnet (separate from the subnet containing the + IPv6-only workloads). For example, the subnet containing the NAT gateway should have a + 0.0.0.0/0 route pointing to the internet gateway. For more information, see Configure DNS64 + and NAT64 in the Amazon VPC User Guide. - `"EnableLniAtDeviceIndex"`: Indicates the device position for local network interfaces in this subnet. For example, 1 indicates local network interfaces in this subnet are the secondary network interface (eth1). A local network interface cannot be the primary network @@ -28315,6 +28546,76 @@ function move_byoip_cidr_to_ipam( ) end +""" + move_capacity_reservation_instances(destination_capacity_reservation_id, instance_count, source_capacity_reservation_id) + move_capacity_reservation_instances(destination_capacity_reservation_id, instance_count, source_capacity_reservation_id, params::Dict{String,<:Any}) + +Move available capacity from a source Capacity Reservation to a destination Capacity +Reservation. The source Capacity Reservation and the destination Capacity Reservation must +be active, owned by your Amazon Web Services account, and share the following: Instance +type Platform Availability Zone Tenancy Placement group Capacity Reservation end +time - At specific time or Manually. + +# Arguments +- `destination_capacity_reservation_id`: The ID of the Capacity Reservation that you want + to move capacity into. +- `instance_count`: The number of instances that you want to move from the source Capacity + Reservation. +- `source_capacity_reservation_id`: The ID of the Capacity Reservation from which you want + to move capacity. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: Unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. For more information, see Ensure Idempotency. +- `"DryRun"`: Checks whether you have the required permissions for the action, without + actually making the request, and provides an error response. If you have the required + permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +""" +function move_capacity_reservation_instances( + DestinationCapacityReservationId, + InstanceCount, + SourceCapacityReservationId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ec2( + "MoveCapacityReservationInstances", + Dict{String,Any}( + "DestinationCapacityReservationId" => DestinationCapacityReservationId, + "InstanceCount" => InstanceCount, + "SourceCapacityReservationId" => SourceCapacityReservationId, + "ClientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function move_capacity_reservation_instances( + DestinationCapacityReservationId, + InstanceCount, + SourceCapacityReservationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ec2( + "MoveCapacityReservationInstances", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "DestinationCapacityReservationId" => DestinationCapacityReservationId, + "InstanceCount" => InstanceCount, + "SourceCapacityReservationId" => SourceCapacityReservationId, + "ClientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ provision_byoip_cidr(cidr) provision_byoip_cidr(cidr, params::Dict{String,<:Any}) @@ -28455,17 +28756,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys \"Cidr\" is required. This value will be null if you specify \"NetmaskLength\" and will be filled in during the provisioning process. - `"CidrAuthorizationContext"`: A signed document that proves that you are authorized to - bring a specified IP address range to Amazon using BYOIP. This option applies to public - pools only. + bring a specified IP address range to Amazon using BYOIP. This option only applies to IPv4 + and IPv6 pools in the public scope. - `"ClientToken"`: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see Ensuring idempotency. - `"DryRun"`: A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +- `"IpamExternalResourceVerificationTokenId"`: Verification token ID. This option only + applies to IPv4 and IPv6 pools in the public scope. - `"NetmaskLength"`: The netmask length of the CIDR you'd like to provision to a pool. Can be used for provisioning Amazon-provided IPv6 CIDRs to top-level pools and for provisioning CIDRs to pools with source pools. Cannot be used to provision BYOIP CIDRs to top-level pools. Either \"NetmaskLength\" or \"Cidr\" is required. +- `"VerificationMethod"`: The method for verifying control of a public IP address range. + Defaults to remarks-x509 if not specified. This option only applies to IPv4 and IPv6 pools + in the public scope. """ function provision_ipam_pool_cidr( IpamPoolId; aws_config::AbstractAWSConfig=global_aws_config() @@ -28508,7 +28814,7 @@ in the Amazon VPC IPAM User Guide. # Arguments - `ipam_pool_id`: The ID of the IPAM pool you would like to use to allocate this CIDR. - `netmask_length`: The netmask length of the CIDR you would like to allocate to the public - IPv4 pool. + IPv4 pool. The least specific netmask length you can define is 24. - `pool_id`: The ID of the public IPv4 pool you would like to use for this CIDR. # Optional Parameters @@ -28516,6 +28822,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DryRun"`: A check for whether you have the required permissions for the action without actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. +- `"NetworkBorderGroup"`: The Availability Zone (AZ) or Local Zone (LZ) network border + group that the resource that the IP address is assigned to is in. Defaults to an AZ network + border group. For more information on available Local Zones, see Local Zone availability in + the Amazon EC2 User Guide. """ function provision_public_ipv4_pool_cidr( IpamPoolId, NetmaskLength, PoolId; aws_config::AbstractAWSConfig=global_aws_config() @@ -28837,33 +29147,36 @@ end Registers an AMI. When you're creating an instance-store backed AMI, registering the AMI is the final step in the creation process. For more information about creating AMIs, see -Create your own AMI in the Amazon Elastic Compute Cloud User Guide. For Amazon EBS-backed -instances, CreateImage creates and registers the AMI in a single request, so you don't have -to register the AMI yourself. We recommend that you always use CreateImage unless you have -a specific reason to use RegisterImage. If needed, you can deregister an AMI at any time. -Any modifications you make to an AMI backed by an instance store volume invalidates its -registration. If you make changes to an image, deregister the previous image and register -the new image. Register a snapshot of a root device volume You can use RegisterImage to -create an Amazon EBS-backed Linux AMI from a snapshot of a root device volume. You specify -the snapshot using a block device mapping. You can't set the encryption state of the volume -using the block device mapping. If the snapshot is encrypted, or encryption by default is -enabled, the root volume of an instance launched from the AMI is encrypted. For more -information, see Create a Linux AMI from a snapshot and Use encryption with Amazon -EBS-backed AMIs in the Amazon Elastic Compute Cloud User Guide. Amazon Web Services -Marketplace product codes If any snapshots have Amazon Web Services Marketplace product -codes, they are copied to the new AMI. Windows and some Linux distributions, such as Red -Hat Enterprise Linux (RHEL) and SUSE Linux Enterprise Server (SLES), use the Amazon EC2 -billing product code associated with an AMI to verify the subscription status for package -updates. To create a new AMI for operating systems that require a billing product code, -instead of registering the AMI, do the following to preserve the billing product code -association: Launch an instance from an existing AMI with that billing product code. -Customize the instance. Create an AMI from the instance using CreateImage. If you -purchase a Reserved Instance to apply to an On-Demand Instance that was launched from an -AMI with a billing product code, make sure that the Reserved Instance has the matching -billing product code. If you purchase a Reserved Instance without the matching billing -product code, the Reserved Instance will not be applied to the On-Demand Instance. For -information about how to obtain the platform details and billing information of an AMI, see -Understand AMI billing information in the Amazon EC2 User Guide. +Create an AMI from a snapshot and Create an instance-store backed AMI in the Amazon EC2 +User Guide. For Amazon EBS-backed instances, CreateImage creates and registers the AMI in +a single request, so you don't have to register the AMI yourself. We recommend that you +always use CreateImage unless you have a specific reason to use RegisterImage. If needed, +you can deregister an AMI at any time. Any modifications you make to an AMI backed by an +instance store volume invalidates its registration. If you make changes to an image, +deregister the previous image and register the new image. Register a snapshot of a root +device volume You can use RegisterImage to create an Amazon EBS-backed Linux AMI from a +snapshot of a root device volume. You specify the snapshot using a block device mapping. +You can't set the encryption state of the volume using the block device mapping. If the +snapshot is encrypted, or encryption by default is enabled, the root volume of an instance +launched from the AMI is encrypted. For more information, see Create an AMI from a snapshot +and Use encryption with Amazon EBS-backed AMIs in the Amazon EC2 User Guide. Amazon Web +Services Marketplace product codes If any snapshots have Amazon Web Services Marketplace +product codes, they are copied to the new AMI. In most cases, AMIs for Windows, RedHat, +SUSE, and SQL Server require correct licensing information to be present on the AMI. For +more information, see Understand AMI billing information in the Amazon EC2 User Guide. When +creating an AMI from a snapshot, the RegisterImage operation derives the correct billing +information from the snapshot's metadata, but this requires the appropriate metadata to be +present. To verify if the correct billing information was applied, check the +PlatformDetails field on the new AMI. If the field is empty or doesn't match the expected +operating system code (for example, Windows, RedHat, SUSE, or SQL), the AMI creation was +unsuccessful, and you should discard the AMI and instead create the AMI from an instance +using CreateImage. For more information, see Create an AMI from an instance in the Amazon +EC2 User Guide. If you purchase a Reserved Instance to apply to an On-Demand Instance that +was launched from an AMI with a billing product code, make sure that the Reserved Instance +has the matching billing product code. If you purchase a Reserved Instance without the +matching billing product code, the Reserved Instance will not be applied to the On-Demand +Instance. For information about how to obtain the platform details and billing information +of an AMI, see Understand AMI billing information in the Amazon EC2 User Guide. # Arguments - `name`: A name for your AMI. Constraints: 3-128 alphanumeric characters, parentheses @@ -30951,11 +31264,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys launch. You can specify tags for the following resources only: Instances Volumes Spot Instance requests Network interfaces To tag a resource after it has been created, see CreateTags. -- `"UserData"`: The user data script to make available to the instance. For more - information, see Run commands on your Amazon EC2 instance at launch in the Amazon EC2 User - Guide. If you are using a command line tool, base64-encoding is performed for you, and you - can load the text from a file. Otherwise, you must provide base64-encoded text. User data - is limited to 16 KB. +- `"UserData"`: The user data to make available to the instance. User data must be + base64-encoded. Depending on the tool or SDK that you're using, the base64-encoding might + be performed for you. For more information, see Work with instance user data. - `"additionalInfo"`: Reserved. - `"clientToken"`: Unique, case-sensitive identifier you provide to ensure the idempotency of the request. If you do not specify a client token, a randomly generated token is used diff --git a/src/services/ecr.jl b/src/services/ecr.jl index 1b94d9de1c..90651f3cf7 100644 --- a/src/services/ecr.jl +++ b/src/services/ecr.jl @@ -286,7 +286,6 @@ see Using pull through cache rules in the Amazon Elastic Container Registry User Hub (docker-hub) - registry-1.docker.io Quay (quay) - quay.io Kubernetes (k8s) - registry.k8s.io GitHub Container Registry (github-container-registry) - ghcr.io Microsoft Azure Container Registry (azure-container-registry) - <custom>.azurecr.io - GitLab Container Registry (gitlab-container-registry) - registry.gitlab.com # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -392,6 +391,83 @@ function create_repository( ) end +""" + create_repository_creation_template(applied_for, prefix) + create_repository_creation_template(applied_for, prefix, params::Dict{String,<:Any}) + +Creates a repository creation template. This template is used to define the settings for +repositories created by Amazon ECR on your behalf. For example, repositories created +through pull through cache actions. For more information, see Private repository creation +templates in the Amazon Elastic Container Registry User Guide. + +# Arguments +- `applied_for`: A list of enumerable strings representing the Amazon ECR repository + creation scenarios that this template will apply towards. The two supported scenarios are + PULL_THROUGH_CACHE and REPLICATION +- `prefix`: The repository namespace prefix to associate with the template. All + repositories created using this namespace prefix will have the settings defined in this + template applied. For example, a prefix of prod would apply to all repositories beginning + with prod/. Similarly, a prefix of prod/team would apply to all repositories beginning with + prod/team/. To apply a template to all repositories in your registry that don't have an + associated creation template, you can use ROOT as the prefix. There is always an assumed / + applied to the end of the prefix. If you specify ecr-public as the prefix, Amazon ECR + treats that as ecr-public/. When using a pull through cache rule, the repository prefix you + specify during rule creation is what you should specify as your repository creation + template prefix as well. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"customRoleArn"`: The ARN of the role to be assumed by Amazon ECR. This role must be in + the same account as the registry that you are configuring. Amazon ECR will assume your + supplied role when the customRoleArn is specified. When this field isn't specified, Amazon + ECR will use the service-linked role for the repository creation template. +- `"description"`: A description for the repository creation template. +- `"encryptionConfiguration"`: The encryption configuration to use for repositories created + using the template. +- `"imageTagMutability"`: The tag mutability setting for the repository. If this parameter + is omitted, the default setting of MUTABLE will be used which will allow image tags to be + overwritten. If IMMUTABLE is specified, all image tags within the repository will be + immutable which will prevent them from being overwritten. +- `"lifecyclePolicy"`: The lifecycle policy to use for repositories created using the + template. +- `"repositoryPolicy"`: The repository policy to apply to repositories created using the + template. A repository policy is a permissions policy associated with a repository to + control access permissions. +- `"resourceTags"`: The metadata to apply to the repository to help you categorize and + organize. Each tag consists of a key and an optional value, both of which you define. Tag + keys can have a maximum character length of 128 characters, and tag values can have a + maximum length of 256 characters. +""" +function create_repository_creation_template( + appliedFor, prefix; aws_config::AbstractAWSConfig=global_aws_config() +) + return ecr( + "CreateRepositoryCreationTemplate", + Dict{String,Any}("appliedFor" => appliedFor, "prefix" => prefix); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_repository_creation_template( + appliedFor, + prefix, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ecr( + "CreateRepositoryCreationTemplate", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("appliedFor" => appliedFor, "prefix" => prefix), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_lifecycle_policy(repository_name) delete_lifecycle_policy(repository_name, params::Dict{String,<:Any}) @@ -543,6 +619,38 @@ function delete_repository( ) end +""" + delete_repository_creation_template(prefix) + delete_repository_creation_template(prefix, params::Dict{String,<:Any}) + +Deletes a repository creation template. + +# Arguments +- `prefix`: The repository namespace prefix associated with the repository creation + template. + +""" +function delete_repository_creation_template( + prefix; aws_config::AbstractAWSConfig=global_aws_config() +) + return ecr( + "DeleteRepositoryCreationTemplate", + Dict{String,Any}("prefix" => prefix); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_repository_creation_template( + prefix, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ecr( + "DeleteRepositoryCreationTemplate", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("prefix" => prefix), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_repository_policy(repository_name) delete_repository_policy(repository_name, params::Dict{String,<:Any}) @@ -852,6 +960,82 @@ function describe_repositories( ) end +""" + describe_repository_creation_templates() + describe_repository_creation_templates(params::Dict{String,<:Any}) + +Returns details about the repository creation templates in a registry. The prefixes request +parameter can be used to return the details for a specific repository creation template. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of repository results returned by + DescribeRepositoryCreationTemplatesRequest in paginated output. When this parameter is + used, DescribeRepositoryCreationTemplatesRequest only returns maxResults results in a + single page along with a nextToken response element. The remaining results of the initial + request can be seen by sending another DescribeRepositoryCreationTemplatesRequest request + with the returned nextToken value. This value can be between 1 and 1000. If this parameter + is not used, then DescribeRepositoryCreationTemplatesRequest returns up to 100 results and + a nextToken value, if applicable. +- `"nextToken"`: The nextToken value returned from a previous paginated + DescribeRepositoryCreationTemplates request where maxResults was used and the results + exceeded the value of that parameter. Pagination continues from the end of the previous + results that returned the nextToken value. This value is null when there are no more + results to return. This token should be treated as an opaque identifier that is only used + to retrieve the next items in a list and not for other programmatic purposes. +- `"prefixes"`: The repository namespace prefixes associated with the repository creation + templates to describe. If this value is not specified, all repository creation templates + are returned. +""" +function describe_repository_creation_templates(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return ecr( + "DescribeRepositoryCreationTemplates"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_repository_creation_templates( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ecr( + "DescribeRepositoryCreationTemplates", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_account_setting(name) + get_account_setting(name, params::Dict{String,<:Any}) + +Retrieves the basic scan type version name. + +# Arguments +- `name`: Basic scan type version name. + +""" +function get_account_setting(name; aws_config::AbstractAWSConfig=global_aws_config()) + return ecr( + "GetAccountSetting", + Dict{String,Any}("name" => name); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_account_setting( + name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ecr( + "GetAccountSetting", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("name" => name), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_authorization_token() get_authorization_token(params::Dict{String,<:Any}) @@ -1256,6 +1440,43 @@ function list_tags_for_resource( ) end +""" + put_account_setting(name, value) + put_account_setting(name, value, params::Dict{String,<:Any}) + +Allows you to change the basic scan type version by setting the name parameter to either +CLAIR to AWS_NATIVE. + +# Arguments +- `name`: Basic scan type version name. +- `value`: Setting value that determines what basic scan type is being used: AWS_NATIVE or + CLAIR. + +""" +function put_account_setting(name, value; aws_config::AbstractAWSConfig=global_aws_config()) + return ecr( + "PutAccountSetting", + Dict{String,Any}("name" => name, "value" => value); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function put_account_setting( + name, + value, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ecr( + "PutAccountSetting", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("name" => name, "value" => value), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ put_image(image_manifest, repository_name) put_image(image_manifest, repository_name, params::Dict{String,<:Any}) @@ -1569,9 +1790,10 @@ configuration for a repository can be retrieved with the DescribeRegistry API ac first time the PutReplicationConfiguration API is called, a service-linked IAM role is created in your account for the replication process. For more information, see Using service-linked roles for Amazon ECR in the Amazon Elastic Container Registry User Guide. -When configuring cross-account replication, the destination account must grant the source -account permission to replicate. This permission is controlled using a registry permissions -policy. For more information, see PutRegistryPolicy. +For more information on the custom role for replication, see Creating an IAM role for +replication. When configuring cross-account replication, the destination account must +grant the source account permission to replicate. This permission is controlled using a +registry permissions policy. For more information, see PutRegistryPolicy. # Arguments - `replication_configuration`: An object representing the replication configuration for a @@ -1889,6 +2111,66 @@ function update_pull_through_cache_rule( ) end +""" + update_repository_creation_template(prefix) + update_repository_creation_template(prefix, params::Dict{String,<:Any}) + +Updates an existing repository creation template. + +# Arguments +- `prefix`: The repository namespace prefix that matches an existing repository creation + template in the registry. All repositories created using this namespace prefix will have + the settings defined in this template applied. For example, a prefix of prod would apply to + all repositories beginning with prod/. This includes a repository named prod/team1 as well + as a repository named prod/repository1. To apply a template to all repositories in your + registry that don't have an associated creation template, you can use ROOT as the prefix. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"appliedFor"`: Updates the list of enumerable strings representing the Amazon ECR + repository creation scenarios that this template will apply towards. The two supported + scenarios are PULL_THROUGH_CACHE and REPLICATION +- `"customRoleArn"`: The ARN of the role to be assumed by Amazon ECR. This role must be in + the same account as the registry that you are configuring. Amazon ECR will assume your + supplied role when the customRoleArn is specified. When this field isn't specified, Amazon + ECR will use the service-linked role for the repository creation template. +- `"description"`: A description for the repository creation template. +- `"encryptionConfiguration"`: +- `"imageTagMutability"`: Updates the tag mutability setting for the repository. If this + parameter is omitted, the default setting of MUTABLE will be used which will allow image + tags to be overwritten. If IMMUTABLE is specified, all image tags within the repository + will be immutable which will prevent them from being overwritten. +- `"lifecyclePolicy"`: Updates the lifecycle policy associated with the specified + repository creation template. +- `"repositoryPolicy"`: Updates the repository policy created using the template. A + repository policy is a permissions policy associated with a repository to control access + permissions. +- `"resourceTags"`: The metadata to apply to the repository to help you categorize and + organize. Each tag consists of a key and an optional value, both of which you define. Tag + keys can have a maximum character length of 128 characters, and tag values can have a + maximum length of 256 characters. +""" +function update_repository_creation_template( + prefix; aws_config::AbstractAWSConfig=global_aws_config() +) + return ecr( + "UpdateRepositoryCreationTemplate", + Dict{String,Any}("prefix" => prefix); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_repository_creation_template( + prefix, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ecr( + "UpdateRepositoryCreationTemplate", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("prefix" => prefix), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ upload_layer_part(layer_part_blob, part_first_byte, part_last_byte, repository_name, upload_id) upload_layer_part(layer_part_blob, part_first_byte, part_last_byte, repository_name, upload_id, params::Dict{String,<:Any}) diff --git a/src/services/ecs.jl b/src/services/ecs.jl index e34865f7c7..2e6b3b4dae 100644 --- a/src/services/ecs.jl +++ b/src/services/ecs.jl @@ -77,12 +77,12 @@ end Creates a new Amazon ECS cluster. By default, your account receives a default cluster when you launch your first container instance. However, you can create your own cluster with a -unique name with the CreateCluster action. When you call the CreateCluster API operation, -Amazon ECS attempts to create the Amazon ECS service-linked role for your account. This is -so that it can manage required resources in other Amazon Web Services services on your -behalf. However, if the user that makes the call doesn't have permissions to create the -service-linked role, it isn't created. For more information, see Using service-linked roles -for Amazon ECS in the Amazon Elastic Container Service Developer Guide. +unique name. When you call the CreateCluster API operation, Amazon ECS attempts to create +the Amazon ECS service-linked role for your account. This is so that it can manage required +resources in other Amazon Web Services services on your behalf. However, if the user that +makes the call doesn't have permissions to create the service-linked role, it isn't +created. For more information, see Using service-linked roles for Amazon ECS in the Amazon +Elastic Container Service Developer Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -152,8 +152,8 @@ end Runs and maintains your desired number of tasks from a specified task definition. If the number of tasks running in a service drops below the desiredCount, Amazon ECS runs another -copy of the task in the specified cluster. To update an existing service, see the -UpdateService action. On March 21, 2024, a change was made to resolve the task definition +copy of the task in the specified cluster. To update an existing service, use +UpdateService. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of a task definition. In addition to maintaining the desired count of tasks in your service, you can optionally run your service @@ -180,8 +180,8 @@ Auto Scaling policies. For more information, see Service scheduler concepts in t Elastic Container Service Developer Guide. You can optionally specify a deployment configuration for your service. The deployment is initiated by changing properties. For example, the deployment might be initiated by the task definition or by your desired count -of a service. This is done with an UpdateService operation. The default value for a replica -service for minimumHealthyPercent is 100%. The default value for a daemon service for +of a service. You can use UpdateService. The default value for a replica service for +minimumHealthyPercent is 100%. The default value for a daemon service for minimumHealthyPercent is 0%. If a service uses the ECS deployment controller, the minimum healthy percent represents a lower limit on the number of tasks in a service that must remain in the RUNNING state during a deployment. Specifically, it represents it as a @@ -212,17 +212,17 @@ the Fargate launch type, the minimum healthy percent and maximum percent values used. This is the case even if they're currently visible when describing your service. When creating a service that uses the EXTERNAL deployment controller, you can specify only parameters that aren't controlled at the task set level. The only required parameter is the -service name. You control your services using the CreateTaskSet operation. For more -information, see Amazon ECS deployment types in the Amazon Elastic Container Service -Developer Guide. When the service scheduler launches new tasks, it determines task -placement. For information about task placement and task placement strategies, see Amazon -ECS task placement in the Amazon Elastic Container Service Developer Guide Starting April -15, 2023, Amazon Web Services will not onboard new customers to Amazon Elastic Inference -(EI), and will help current customers migrate their workloads to options that offer better -price and performance. After April 15, 2023, new customers will not be able to launch -instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. -However, customers who have used Amazon EI at least once during the past 30-day period are -considered current customers and will be able to continue using the service. +service name. You control your services using the CreateTaskSet. For more information, see +Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide. When +the service scheduler launches new tasks, it determines task placement. For information +about task placement and task placement strategies, see Amazon ECS task placement in the +Amazon Elastic Container Service Developer Guide Starting April 15, 2023, Amazon Web +Services will not onboard new customers to Amazon Elastic Inference (EI), and will help +current customers migrate their workloads to options that offer better price and +performance. After April 15, 2023, new customers will not be able to launch instances with +Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers +who have used Amazon EI at least once during the past 30-day period are considered current +customers and will be able to continue using the service. # Arguments - `service_name`: The name of your service. Up to 255 letters (uppercase and lowercase), @@ -423,9 +423,8 @@ the EXTERNAL deployment controller type. For more information, see Amazon ECS de types in the Amazon Elastic Container Service Developer Guide. On March 21, 2024, a change was made to resolve the task definition revision before authorization. When a task definition revision is not specified, authorization will occur using the latest revision of -a task definition. For information about the maximum number of task sets and otther -quotas, see Amazon ECS service quotas in the Amazon Elastic Container Service Developer -Guide. +a task definition. For information about the maximum number of task sets and other quotas, +see Amazon ECS service quotas in the Amazon Elastic Container Service Developer Guide. # Arguments - `cluster`: The short name or full Amazon Resource Name (ARN) of the cluster that hosts @@ -446,11 +445,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys parameter must be omitted. If no capacityProviderStrategy or launchType is specified, the defaultCapacityProviderStrategy for the cluster is used. If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must already be created. New - capacity providers can be created with the CreateCapacityProvider API operation. To use a - Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT capacity providers. - The Fargate capacity providers are available to all accounts and only need to be associated - with a cluster to be used. The PutClusterCapacityProviders API operation is used to update - the list of available capacity providers for a cluster after the cluster is created. + capacity providers can be created with the CreateCapacityProviderProviderAPI operation. To + use a Fargate capacity provider, specify either the FARGATE or FARGATE_SPOT capacity + providers. The Fargate capacity providers are available to all accounts and only need to be + associated with a cluster to be used. The PutClusterCapacityProviders API operation is used + to update the list of available capacity providers for a cluster after the cluster is + created. - `"clientToken"`: An identifier that you provide to ensure the idempotency of the request. It must be unique and is case sensitive. Up to 36 ASCII characters in the range of 33-126 (inclusive) are allowed. @@ -611,15 +611,15 @@ end Deletes the specified capacity provider. The FARGATE and FARGATE_SPOT capacity providers are reserved and can't be deleted. You can disassociate them from a cluster using either -the PutClusterCapacityProviders API or by deleting the cluster. Prior to a capacity -provider being deleted, the capacity provider must be removed from the capacity provider -strategy from all services. The UpdateService API can be used to remove a capacity provider -from a service's capacity provider strategy. When updating a service, the -forceNewDeployment option can be used to ensure that any tasks using the Amazon EC2 -instance capacity provided by the capacity provider are transitioned to use the capacity -from the remaining capacity providers. Only capacity providers that aren't associated with -a cluster can be deleted. To remove a capacity provider from a cluster, you can either use -PutClusterCapacityProviders or delete the cluster. +PutCapacityProviderProviders or by deleting the cluster. Prior to a capacity provider +being deleted, the capacity provider must be removed from the capacity provider strategy +from all services. The UpdateService API can be used to remove a capacity provider from a +service's capacity provider strategy. When updating a service, the forceNewDeployment +option can be used to ensure that any tasks using the Amazon EC2 instance capacity provided +by the capacity provider are transitioned to use the capacity from the remaining capacity +providers. Only capacity providers that aren't associated with a cluster can be deleted. To +remove a capacity provider from a cluster, you can either use PutCapacityProviderProviders +or delete the cluster. # Arguments - `capacity_provider`: The short name or full Amazon Resource Name (ARN) of the capacity @@ -2233,12 +2233,10 @@ containers can then use the latest versions of the CLI or SDKs to make API reque Amazon Web Services services that are specified in the policy that's associated with the role. For more information, see IAM Roles for Tasks in the Amazon Elastic Container Service Developer Guide. You can specify a Docker networking mode for the containers in your task -definition with the networkMode parameter. The available network modes correspond to those -described in Network settings in the Docker run reference. If you specify the awsvpc -network mode, the task is allocated an elastic network interface, and you must specify a -NetworkConfiguration when you create a service or run a task with the task definition. For -more information, see Task Networking in the Amazon Elastic Container Service Developer -Guide. +definition with the networkMode parameter. If you specify the awsvpc network mode, the task +is allocated an elastic network interface, and you must specify a NetworkConfiguration when +you create a service or run a task with the task definition. For more information, see Task +Networking in the Amazon Elastic Container Service Developer Guide. # Arguments - `container_definitions`: A list of container definitions in JSON format that describe the @@ -2278,9 +2276,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys platform version 1.0.0 or later. - `"executionRoleArn"`: The Amazon Resource Name (ARN) of the task execution role that grants the Amazon ECS container agent permission to make Amazon Web Services API calls on - your behalf. The task execution IAM role is required depending on the requirements of your - task. For more information, see Amazon ECS task execution IAM role in the Amazon Elastic - Container Service Developer Guide. + your behalf. For informationabout the required IAM roles for Amazon ECS, see IAM roles for + Amazon ECS in the Amazon Elastic Container Service Developer Guide. - `"inferenceAccelerators"`: The Elastic Inference accelerators to use for the containers in the task. - `"ipcMode"`: The IPC resource namespace to use for the containers in the task. The valid @@ -2290,16 +2287,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the specified task share the same IPC resources. If none is specified, then IPC resources within the containers of a task are private and not shared with other containers in a task or on the container instance. If no value is specified, then the IPC resource namespace - sharing depends on the Docker daemon setting on the container instance. For more - information, see IPC settings in the Docker run reference. If the host IPC mode is used, be - aware that there is a heightened risk of undesired IPC namespace expose. For more - information, see Docker security. If you are setting namespaced kernel parameters using - systemControls for the containers in the task, the following will apply to your IPC - resource namespace. For more information, see System Controls in the Amazon Elastic - Container Service Developer Guide. For tasks that use the host IPC mode, IPC namespace - related systemControls are not supported. For tasks that use the task IPC mode, IPC - namespace related systemControls will apply to all containers within a task. This - parameter is not supported for Windows containers or tasks run on Fargate. + sharing depends on the Docker daemon setting on the container instance. If the host IPC + mode is used, be aware that there is a heightened risk of undesired IPC namespace expose. + If you are setting namespaced kernel parameters using systemControls for the containers in + the task, the following will apply to your IPC resource namespace. For more information, + see System Controls in the Amazon Elastic Container Service Developer Guide. For tasks + that use the host IPC mode, IPC namespace related systemControls are not supported. For + tasks that use the task IPC mode, IPC namespace related systemControls will apply to all + containers within a task. This parameter is not supported for Windows containers or + tasks run on Fargate. - `"memory"`: The amount of memory (in MiB) used by the task. It can be expressed as an integer using MiB (for example ,1024) or as a string using GB (for example, 1GB or 1 GB) in a task definition. String values are converted to an integer indicating the MiB when the @@ -2337,7 +2333,6 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys definition. For more information, see Task Networking in the Amazon Elastic Container Service Developer Guide. If the network mode is host, you cannot run multiple instantiations of the same task on a single container instance when port mappings are used. - For more information, see Network settings in the Docker run reference. - `"pidMode"`: The process namespace to use for the containers in the task. The valid values are host or task. On Fargate for Linux containers, the only valid value is task. For example, monitoring sidecars might need pidMode to access information about other @@ -2345,12 +2340,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys that specified the host PID mode on the same container instance share the same process namespace with the host Amazon EC2 instance. If task is specified, all containers within the specified task share the same process namespace. If no value is specified, the default - is a private namespace for each container. For more information, see PID settings in the - Docker run reference. If the host PID mode is used, there's a heightened risk of undesired - process namespace exposure. For more information, see Docker security. This parameter is - not supported for Windows containers. This parameter is only supported for tasks that are - hosted on Fargate if the tasks are using platform version 1.4.0 or later (Linux). This - isn't supported for Windows containers on Fargate. + is a private namespace for each container. If the host PID mode is used, there's a + heightened risk of undesired process namespace exposure. This parameter is not supported + for Windows containers. This parameter is only supported for tasks that are hosted on + Fargate if the tasks are using platform version 1.4.0 or later (Linux). This isn't + supported for Windows containers on Fargate. - `"placementConstraints"`: An array of placement constraint objects to use for the task. You can specify a maximum of 10 constraints for each task. This limit includes constraints in the task definition and those specified at runtime. @@ -2526,18 +2520,18 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Container Service Developer Guide. - `"propagateTags"`: Specifies whether to propagate the tags from the task definition to the task. If no value is specified, the tags aren't propagated. Tags can only be propagated - to the task during task creation. To add tags to a task after task creation, use the - TagResource API action. An error will be received if you specify the SERVICE option when - running a task. + to the task during task creation. To add tags to a task after task creation, use + theTagResource API action. An error will be received if you specify the SERVICE option + when running a task. - `"referenceId"`: The reference ID to use for the task. The reference ID can have a maximum length of 1024 characters. - `"startedBy"`: An optional tag specified when a task is started. For example, if you automatically trigger a task to run a batch process job, you could apply a unique identifier for that job to your task with the startedBy parameter. You can then identify which tasks belong to that job by filtering the results of a ListTasks call with the - startedBy value. Up to 128 letters (uppercase and lowercase), numbers, hyphens (-), and - underscores (_) are allowed. If a task is started by an Amazon ECS service, then the - startedBy parameter contains the deployment ID of the service that starts it. + startedBy value. Up to 128 letters (uppercase and lowercase), numbers, hyphens (-), forward + slash (/), and underscores (_) are allowed. If a task is started by an Amazon ECS service, + then the startedBy parameter contains the deployment ID of the service that starts it. - `"tags"`: The metadata that you apply to the task to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 @@ -2600,8 +2594,8 @@ current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current -customers and will be able to continue using the service. Alternatively, you can use -RunTask to place tasks for you. For more information, see Scheduling Tasks in the Amazon +customers and will be able to continue using the service. Alternatively, you can +useRunTask to place tasks for you. For more information, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide. You can attach Amazon EBS volumes to Amazon ECS tasks by configuring the volume when creating or updating a service. For more infomation, see Amazon EBS volumes in the Amazon Elastic Container Service Developer Guide. @@ -2641,9 +2635,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys automatically trigger a task to run a batch process job, you could apply a unique identifier for that job to your task with the startedBy parameter. You can then identify which tasks belong to that job by filtering the results of a ListTasks call with the - startedBy value. Up to 36 letters (uppercase and lowercase), numbers, hyphens (-), and - underscores (_) are allowed. If a task is started by an Amazon ECS service, the startedBy - parameter contains the deployment ID of the service that starts it. + startedBy value. Up to 36 letters (uppercase and lowercase), numbers, hyphens (-), forward + slash (/), and underscores (_) are allowed. If a task is started by an Amazon ECS service, + the startedBy parameter contains the deployment ID of the service that starts it. - `"tags"`: The metadata that you apply to the task to help you categorize and organize them. Each tag consists of a key and an optional value, both of which you define. The following basic restrictions apply to tags: Maximum number of tags per resource - 50 @@ -2701,9 +2695,9 @@ end stop_task(task) stop_task(task, params::Dict{String,<:Any}) -Stops a running task. Any tags associated with the task will be deleted. When StopTask is -called on a task, the equivalent of docker stop is issued to the containers running in the -task. This results in a SIGTERM value and a default 30-second timeout, after which the +Stops a running task. Any tags associated with the task will be deleted. When you call +StopTask on a task, the equivalent of docker stop is issued to the containers running in +the task. This results in a SIGTERM value and a default 30-second timeout, after which the SIGKILL value is sent and the containers are forcibly stopped. If the container handles the SIGTERM value gracefully and exits within 30 seconds from receiving it, no SIGKILL value is sent. For Windows containers, POSIX signals do not work and runtime stops the container by @@ -2722,7 +2716,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the task to stop. If you do not specify a cluster, the default cluster is assumed. - `"reason"`: An optional message specified when a task is stopped. For example, if you're using a custom scheduler, you can use this parameter to specify the reason for stopping the - task here, and the message appears in subsequent DescribeTasks API operations on this task. + task here, and the message appears in subsequent DescribeTasks> API operations on this + task. """ function stop_task(task; aws_config::AbstractAWSConfig=global_aws_config()) return ecs( @@ -3326,11 +3321,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys The PutClusterCapacityProviders API is used to associate a capacity provider with a cluster. Only capacity providers with an ACTIVE or UPDATING status can be used. If specifying a capacity provider that uses an Auto Scaling group, the capacity provider must - already be created. New capacity providers can be created with the CreateCapacityProvider - API operation. To use a Fargate capacity provider, specify either the FARGATE or - FARGATE_SPOT capacity providers. The Fargate capacity providers are available to all - accounts and only need to be associated with a cluster to be used. The - PutClusterCapacityProviders API operation is used to update the list of available capacity + already be created. New capacity providers can be created with the + CreateClusterCapacityProvider API operation. To use a Fargate capacity provider, specify + either the FARGATE or FARGATE_SPOT capacity providers. The Fargate capacity providers are + available to all accounts and only need to be associated with a cluster to be used. The + PutClusterCapacityProvidersAPI operation is used to update the list of available capacity providers for a cluster after the cluster is created. - `"cluster"`: The short name or full Amazon Resource Name (ARN) of the cluster that your service runs on. If you do not specify a cluster, the default cluster is assumed. diff --git a/src/services/eks.jl b/src/services/eks.jl index 0c0fe28615..43f0bbd9cb 100644 --- a/src/services/eks.jl +++ b/src/services/eks.jl @@ -394,6 +394,10 @@ Launching Amazon EKS nodes in the Amazon EKS User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"accessConfig"`: The access configuration for the cluster. +- `"bootstrapSelfManagedAddons"`: If you set this value to False when creating a cluster, + the default networking add-ons will not be installed. The default networking addons include + vpc-cni, coredns, and kube-proxy. Use this option when you plan to install third-party + alternative add-ons or self-manage the default networking add-ons. - `"clientRequestToken"`: A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. - `"encryptionConfig"`: The encryption configuration for the cluster. @@ -411,6 +415,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"tags"`: Metadata that assists with categorization and organization. Each tag consists of a key and an optional value. You define both. Tags don't propagate to any other cluster or Amazon Web Services resources. +- `"upgradePolicy"`: New clusters, by default, have extended support enabled. You can + disable extended support when creating a cluster by setting this value to STANDARD. - `"version"`: The desired Kubernetes version for your cluster. If you don't specify a value here, the default version available in Amazon EKS is used. The default version might not be the latest version available. @@ -2591,6 +2597,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Amazon EKS User Guide . CloudWatch Logs ingestion, archive storage, and data scanning rates apply to exported control plane logs. For more information, see CloudWatch Pricing. - `"resourcesVpcConfig"`: +- `"upgradePolicy"`: You can enable or disable extended support for clusters currently on + standard support. You cannot disable extended support once it starts. You must enable + extended support before your cluster exits standard support. """ function update_cluster_config(name; aws_config::AbstractAWSConfig=global_aws_config()) return eks( diff --git a/src/services/elastic_load_balancing_v2.jl b/src/services/elastic_load_balancing_v2.jl index 32780708d9..53334a4eda 100644 --- a/src/services/elastic_load_balancing_v2.jl +++ b/src/services/elastic_load_balancing_v2.jl @@ -616,6 +616,49 @@ function delete_rule( ) end +""" + delete_shared_trust_store_association(resource_arn, trust_store_arn) + delete_shared_trust_store_association(resource_arn, trust_store_arn, params::Dict{String,<:Any}) + +Deletes a shared trust store association. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource. +- `trust_store_arn`: The Amazon Resource Name (ARN) of the trust store. + +""" +function delete_shared_trust_store_association( + ResourceArn, TrustStoreArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return elastic_load_balancing_v2( + "DeleteSharedTrustStoreAssociation", + Dict{String,Any}("ResourceArn" => ResourceArn, "TrustStoreArn" => TrustStoreArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_shared_trust_store_association( + ResourceArn, + TrustStoreArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return elastic_load_balancing_v2( + "DeleteSharedTrustStoreAssociation", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ResourceArn" => ResourceArn, "TrustStoreArn" => TrustStoreArn + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_target_group(target_group_arn) delete_target_group(target_group_arn, params::Dict{String,<:Any}) @@ -770,6 +813,41 @@ function describe_account_limits( ) end +""" + describe_listener_attributes(listener_arn) + describe_listener_attributes(listener_arn, params::Dict{String,<:Any}) + +Describes the attributes for the specified listener. + +# Arguments +- `listener_arn`: The Amazon Resource Name (ARN) of the listener. + +""" +function describe_listener_attributes( + ListenerArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return elastic_load_balancing_v2( + "DescribeListenerAttributes", + Dict{String,Any}("ListenerArn" => ListenerArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_listener_attributes( + ListenerArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return elastic_load_balancing_v2( + "DescribeListenerAttributes", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ListenerArn" => ListenerArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_listener_certificates(listener_arn) describe_listener_certificates(listener_arn, params::Dict{String,<:Any}) @@ -1095,7 +1173,7 @@ Describes the health of the specified targets or all of your targets. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Include"`: Used to inclue anomaly detection information. +- `"Include"`: Used to include anomaly detection information. - `"Targets"`: The targets. """ function describe_target_health( @@ -1167,7 +1245,7 @@ end describe_trust_store_revocations(trust_store_arn) describe_trust_store_revocations(trust_store_arn, params::Dict{String,<:Any}) -Describes the revocation files in use by the specified trust store arn, or revocation ID. +Describes the revocation files in use by the specified trust store or revocation files. # Arguments - `trust_store_arn`: The Amazon Resource Name (ARN) of the trust store. @@ -1208,7 +1286,7 @@ end describe_trust_stores() describe_trust_stores(params::Dict{String,<:Any}) -Describes all trust stores for a given account by trust store arn’s or name. +Describes all trust stores for the specified account. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1234,6 +1312,39 @@ function describe_trust_stores( ) end +""" + get_resource_policy(resource_arn) + get_resource_policy(resource_arn, params::Dict{String,<:Any}) + +Retrieves the resource policy for a specified resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource. + +""" +function get_resource_policy(ResourceArn; aws_config::AbstractAWSConfig=global_aws_config()) + return elastic_load_balancing_v2( + "GetResourcePolicy", + Dict{String,Any}("ResourceArn" => ResourceArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_resource_policy( + ResourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return elastic_load_balancing_v2( + "GetResourcePolicy", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ResourceArn" => ResourceArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_trust_store_ca_certificates_bundle(trust_store_arn) get_trust_store_ca_certificates_bundle(trust_store_arn, params::Dict{String,<:Any}) @@ -1373,6 +1484,47 @@ function modify_listener( ) end +""" + modify_listener_attributes(attributes, listener_arn) + modify_listener_attributes(attributes, listener_arn, params::Dict{String,<:Any}) + +Modifies the specified attributes of the specified listener. + +# Arguments +- `attributes`: The listener attributes. +- `listener_arn`: The Amazon Resource Name (ARN) of the listener. + +""" +function modify_listener_attributes( + Attributes, ListenerArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return elastic_load_balancing_v2( + "ModifyListenerAttributes", + Dict{String,Any}("Attributes" => Attributes, "ListenerArn" => ListenerArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function modify_listener_attributes( + Attributes, + ListenerArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return elastic_load_balancing_v2( + "ModifyListenerAttributes", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("Attributes" => Attributes, "ListenerArn" => ListenerArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ modify_load_balancer_attributes(attributes, load_balancer_arn) modify_load_balancer_attributes(attributes, load_balancer_arn, params::Dict{String,<:Any}) @@ -1525,7 +1677,7 @@ end Modifies the specified attributes of the specified target group. # Arguments -- `attributes`: The attributes. +- `attributes`: The target group attributes. - `target_group_arn`: The Amazon Resource Name (ARN) of the target group. """ @@ -1565,7 +1717,7 @@ end modify_trust_store(ca_certificates_bundle_s3_bucket, ca_certificates_bundle_s3_key, trust_store_arn) modify_trust_store(ca_certificates_bundle_s3_bucket, ca_certificates_bundle_s3_key, trust_store_arn, params::Dict{String,<:Any}) -Update the ca certificate bundle for a given trust store. +Update the ca certificate bundle for the specified trust store. # Arguments - `ca_certificates_bundle_s3_bucket`: The Amazon S3 bucket for the ca certificates bundle. @@ -1810,7 +1962,10 @@ Sets the type of IP addresses used by the subnets of the specified load balancer - `ip_address_type`: Note: Internal load balancers must use the ipv4 IP address type. [Application Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses), dualstack (for IPv4 and IPv6 addresses), and dualstack-without-public-ipv4 - (for IPv6 only public addresses, with private IPv4 and IPv6 addresses). [Network Load + (for IPv6 only public addresses, with private IPv4 and IPv6 addresses). Note: Application + Load Balancer authentication only supports IPv4 addresses when connecting to an Identity + Provider (IdP) or Amazon Cognito endpoint. Without a public IPv4 address the load balancer + cannot complete the authentication process, resulting in HTTP 500 errors. [Network Load Balancers] The IP address type. The possible values are ipv4 (for only IPv4 addresses) and dualstack (for IPv4 and IPv6 addresses). You can’t specify dualstack for a load balancer with a UDP or TCP_UDP listener. [Gateway Load Balancers] The IP address type. The possible diff --git a/src/services/elasticache.jl b/src/services/elasticache.jl index f1917987c5..3fec282e64 100644 --- a/src/services/elasticache.jl +++ b/src/services/elasticache.jl @@ -253,20 +253,21 @@ end copy_serverless_cache_snapshot(source_serverless_cache_snapshot_name, target_serverless_cache_snapshot_name) copy_serverless_cache_snapshot(source_serverless_cache_snapshot_name, target_serverless_cache_snapshot_name, params::Dict{String,<:Any}) -Creates a copy of an existing serverless cache’s snapshot. Available for Redis only. +Creates a copy of an existing serverless cache’s snapshot. Available for Redis OSS and +Serverless Memcached only. # Arguments - `source_serverless_cache_snapshot_name`: The identifier of the existing serverless - cache’s snapshot to be copied. Available for Redis only. + cache’s snapshot to be copied. Available for Redis OSS and Serverless Memcached only. - `target_serverless_cache_snapshot_name`: The identifier for the snapshot to be created. - Available for Redis only. + Available for Redis OSS and Serverless Memcached only. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"KmsKeyId"`: The identifier of the KMS key used to encrypt the target snapshot. - Available for Redis only. + Available for Redis OSS and Serverless Memcached only. - `"Tags"`: A list of tags to be added to the target snapshot resource. A tag is a - key-value pair. Available for Redis only. Default: NULL + key-value pair. Available for Redis OSS and Serverless Memcached only. Default: NULL """ function copy_serverless_cache_snapshot( SourceServerlessCacheSnapshotName, @@ -312,34 +313,35 @@ end copy_snapshot(source_snapshot_name, target_snapshot_name) copy_snapshot(source_snapshot_name, target_snapshot_name, params::Dict{String,<:Any}) -Makes a copy of an existing snapshot. This operation is valid for Redis only. Users or -groups that have permissions to use the CopySnapshot operation can create their own Amazon -S3 buckets and copy snapshots to it. To control access to your snapshots, use an IAM policy -to control who has the ability to use the CopySnapshot operation. For more information -about using IAM to control the use of ElastiCache operations, see Exporting Snapshots and -Authentication & Access Control. You could receive the following error messages. -Error Messages Error Message: The S3 bucket %s is outside of the region. Solution: -Create an Amazon S3 bucket in the same region as your snapshot. For more information, see -Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide. Error Message: The S3 -bucket %s does not exist. Solution: Create an Amazon S3 bucket in the same region as your -snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache -User Guide. Error Message: The S3 bucket %s is not owned by the authenticated user. +Makes a copy of an existing snapshot. This operation is valid for Redis OSS only. Users +or groups that have permissions to use the CopySnapshot operation can create their own +Amazon S3 buckets and copy snapshots to it. To control access to your snapshots, use an IAM +policy to control who has the ability to use the CopySnapshot operation. For more +information about using IAM to control the use of ElastiCache operations, see Exporting +Snapshots and Authentication & Access Control. You could receive the following error +messages. Error Messages Error Message: The S3 bucket %s is outside of the region. Solution: Create an Amazon S3 bucket in the same region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache User Guide. Error -Message: The authenticated user does not have sufficient permissions to perform the desired -activity. Solution: Contact your system administrator to get the needed permissions. -Error Message: The S3 bucket %s already contains an object with key %s. Solution: Give the -TargetSnapshotName a new and unique value. If exporting a snapshot, you could alternatively -create a new Amazon S3 bucket and use this same value for TargetSnapshotName. Error -Message: ElastiCache has not been granted READ permissions %s on the S3 Bucket. Solution: -Add List and Read permissions on the bucket. For more information, see Step 2: Grant -ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide. Error -Message: ElastiCache has not been granted WRITE permissions %s on the S3 Bucket. -Solution: Add Upload/Delete permissions on the bucket. For more information, see Step 2: -Grant ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide. Error -Message: ElastiCache has not been granted READ_ACP permissions %s on the S3 Bucket. -Solution: Add View Permissions on the bucket. For more information, see Step 2: Grant -ElastiCache Access to Your Amazon S3 Bucket in the ElastiCache User Guide. +Message: The S3 bucket %s does not exist. Solution: Create an Amazon S3 bucket in the same +region as your snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in +the ElastiCache User Guide. Error Message: The S3 bucket %s is not owned by the +authenticated user. Solution: Create an Amazon S3 bucket in the same region as your +snapshot. For more information, see Step 1: Create an Amazon S3 Bucket in the ElastiCache +User Guide. Error Message: The authenticated user does not have sufficient permissions +to perform the desired activity. Solution: Contact your system administrator to get the +needed permissions. Error Message: The S3 bucket %s already contains an object with key +%s. Solution: Give the TargetSnapshotName a new and unique value. If exporting a snapshot, +you could alternatively create a new Amazon S3 bucket and use this same value for +TargetSnapshotName. Error Message: ElastiCache has not been granted READ permissions %s +on the S3 Bucket. Solution: Add List and Read permissions on the bucket. For more +information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the +ElastiCache User Guide. Error Message: ElastiCache has not been granted WRITE +permissions %s on the S3 Bucket. Solution: Add Upload/Delete permissions on the bucket. +For more information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the +ElastiCache User Guide. Error Message: ElastiCache has not been granted READ_ACP +permissions %s on the S3 Bucket. Solution: Add View Permissions on the bucket. For more +information, see Step 2: Grant ElastiCache Access to Your Amazon S3 Bucket in the +ElastiCache User Guide. # Arguments - `source_snapshot_name`: The name of an existing snapshot from which to make a copy. @@ -402,8 +404,8 @@ end create_cache_cluster(cache_cluster_id, params::Dict{String,<:Any}) Creates a cluster. All nodes in the cluster run the same protocol-compliant cache engine -software, either Memcached or Redis. This operation is not supported for Redis (cluster -mode enabled) clusters. +software, either Memcached or Redis OSS. This operation is not supported for Redis OSS +(cluster mode enabled) clusters. # Arguments - `cache_cluster_id`: The node group (shard) identifier. This parameter is stored as a @@ -423,8 +425,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys special characters are !, &, #, , ^, <, >, and -. Other printable special characters cannot be used in the AUTH token. For more information, see AUTH password at http://redis.io/commands/AUTH. -- `"AutoMinorVersionUpgrade"`:  If you are running Redis engine version 6.0 or later, set - this parameter to yes if you want to opt-in to the next auto minor version upgrade +- `"AutoMinorVersionUpgrade"`:  If you are running Redis OSS engine version 6.0 or later, + set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. - `"CacheNodeType"`: The compute and memory capacity of the nodes in the node group (shard). The following node types are supported by ElastiCache. Generally speaking, the @@ -432,15 +434,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys compared to their equivalent previous generation counterparts. General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region - availability, see Supported Node Types M6g node types (available only for Redis engine - version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large, - cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, + availability, see Supported Node Types M6g node types (available only for Redis OSS + engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): + cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge - T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine - version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node types: - cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, + T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached + engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node + types: cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) T1 node types: cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, @@ -450,9 +452,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys supported for these types.) C1 node types: cache.c1.xlarge Memory optimized: Current generation: R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge For region - availability, see Supported Node Types R6g node types (available only for Redis engine - version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, - cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, + availability, see Supported Node Types R6g node types (available only for Redis OSS + engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): + cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, @@ -461,9 +463,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. - Redis append-only files (AOF) are not supported for T1 or T2 instances. Redis Multi-AZ - with automatic failover is not supported on T1 instances. Redis configuration variables - appendonly and appendfsync are not supported on Redis version 2.8.22 and later. + Redis OSS append-only files (AOF) are not supported for T1 or T2 instances. Redis OSS + Multi-AZ with automatic failover is not supported on T1 instances. Redis OSS + configuration variables appendonly and appendfsync are not supported on Redis OSS version + 2.8.22 and later. - `"CacheParameterGroupName"`: The name of the parameter group to associate with this cluster. If this argument is omitted, the default parameter group for the specified engine is used. You cannot use any parameter group which has cluster-enabled='yes' when creating a @@ -485,17 +488,17 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys earlier engine version, you must delete the existing cluster or replication group and create it anew with the earlier engine version. - `"IpDiscovery"`: The network type you choose when modifying a cluster, either ipv4 | - ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached - engine version 1.6.6 on all instances built on the Nitro system. + ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or + Memcached engine version 1.6.6 on all instances built on the Nitro system. - `"LogDeliveryConfigurations"`: Specifies the destination, format and type of the logs. - `"NetworkType"`: Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads - using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances - built on the Nitro system. + using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all + instances built on the Nitro system. - `"NotificationTopicArn"`: The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent. The Amazon SNS topic owner must be the same as the cluster owner. - `"NumCacheNodes"`: The initial number of cache nodes that the cluster has. For clusters - running Redis, this value must be 1. For clusters running Memcached, this value must be + running Redis OSS, this value must be 1. For clusters running Memcached, this value must be between 1 and 40. If you need more than 40 nodes for your Memcached cluster, please fill out the ElastiCache Limit Increase Request form at http://aws.amazon.com/contact-us/elasticache-node-limit-request/. @@ -531,12 +534,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys this parameter only when you are creating a cluster in an Amazon Virtual Private Cloud (Amazon VPC). - `"SnapshotArns"`: A single-element string list containing an Amazon Resource Name (ARN) - that uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot file - is used to populate the node group (shard). The Amazon S3 object name in the ARN cannot - contain any commas. This parameter is only valid if the Engine parameter is redis. + that uniquely identifies a Redis OSS RDB snapshot file stored in Amazon S3. The snapshot + file is used to populate the node group (shard). The Amazon S3 object name in the ARN + cannot contain any commas. This parameter is only valid if the Engine parameter is redis. Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb -- `"SnapshotName"`: The name of a Redis snapshot from which to restore data into the new - node group (shard). The snapshot status changes to restoring while the new node group +- `"SnapshotName"`: The name of a Redis OSS snapshot from which to restore data into the + new node group (shard). The snapshot status changes to restoring while the new node group (shard) is being created. This parameter is only valid if the Engine parameter is redis. - `"SnapshotRetentionLimit"`: The number of days for which ElastiCache retains automatic snapshots before deleting them. For example, if you set SnapshotRetentionLimit to 5, a @@ -763,11 +766,11 @@ end create_global_replication_group(global_replication_group_id_suffix, primary_replication_group_id) create_global_replication_group(global_replication_group_id_suffix, primary_replication_group_id, params::Dict{String,<:Any}) -Global Datastore for Redis offers fully managed, fast, reliable and secure cross-region -replication. Using Global Datastore for Redis, you can create cross-region read replica -clusters for ElastiCache for Redis to enable low-latency reads and disaster recovery across -regions. For more information, see Replication Across Regions Using Global Datastore. -The GlobalReplicationGroupIdSuffix is the name of the Global datastore. The +Global Datastore for Redis OSS offers fully managed, fast, reliable and secure cross-region +replication. Using Global Datastore for Redis OSS, you can create cross-region read replica +clusters for ElastiCache (Redis OSS) to enable low-latency reads and disaster recovery +across regions. For more information, see Replication Across Regions Using Global +Datastore. The GlobalReplicationGroupIdSuffix is the name of the Global datastore. The PrimaryReplicationGroupId represents the name of the primary cluster that accepts writes and will replicate updates to the secondary cluster. @@ -828,29 +831,29 @@ end create_replication_group(replication_group_description, replication_group_id) create_replication_group(replication_group_description, replication_group_id, params::Dict{String,<:Any}) -Creates a Redis (cluster mode disabled) or a Redis (cluster mode enabled) replication -group. This API can be used to create a standalone regional replication group or a -secondary replication group associated with a Global datastore. A Redis (cluster mode -disabled) replication group is a collection of nodes, where one of the nodes is a +Creates a Redis OSS (cluster mode disabled) or a Redis OSS (cluster mode enabled) +replication group. This API can be used to create a standalone regional replication group +or a secondary replication group associated with a Global datastore. A Redis OSS (cluster +mode disabled) replication group is a collection of nodes, where one of the nodes is a read/write primary and the others are read-only replicas. Writes to the primary are -asynchronously propagated to the replicas. A Redis cluster-mode enabled cluster is +asynchronously propagated to the replicas. A Redis OSS cluster-mode enabled cluster is comprised of from 1 to 90 shards (API/CLI: node groups). Each shard has a primary node and up to 5 read-only replica nodes. The configuration can range from 90 shards and 0 replicas to 15 shards and 5 replicas, which is the maximum number or replicas allowed. The node or -shard limit can be increased to a maximum of 500 per cluster if the Redis engine version is -5.0.6 or higher. For example, you can choose to configure a 500 node cluster that ranges -between 83 shards (one primary and 5 replicas per shard) and 500 shards (single primary and -no replicas). Make sure there are enough available IP addresses to accommodate the -increase. Common pitfalls include the subnets in the subnet group have too small a CIDR +shard limit can be increased to a maximum of 500 per cluster if the Redis OSS engine +version is 5.0.6 or higher. For example, you can choose to configure a 500 node cluster +that ranges between 83 shards (one primary and 5 replicas per shard) and 500 shards (single +primary and no replicas). Make sure there are enough available IP addresses to accommodate +the increase. Common pitfalls include the subnets in the subnet group have too small a CIDR range or the subnets are shared and heavily used by other clusters. For more information, see Creating a Subnet Group. For versions below 5.0.6, the limit is 250 per cluster. To request a limit increase, see Amazon Service Limits and choose the limit type Nodes per -cluster per instance type. When a Redis (cluster mode disabled) replication group has been -successfully created, you can add one or more read replicas to it, up to a total of 5 read -replicas. If you need to increase or decrease the number of node groups (console: shards), -you can avail yourself of ElastiCache for Redis' scaling. For more information, see Scaling -ElastiCache for Redis Clusters in the ElastiCache User Guide. This operation is valid for -Redis only. +cluster per instance type. When a Redis OSS (cluster mode disabled) replication group has +been successfully created, you can add one or more read replicas to it, up to a total of 5 +read replicas. If you need to increase or decrease the number of node groups (console: +shards), you can use ElastiCache (Redis OSS) scaling. For more information, see Scaling +ElastiCache (Redis OSS) Clusters in the ElastiCache User Guide. This operation is valid +for Redis OSS only. # Arguments - `replication_group_description`: A user-created description for the replication group. @@ -865,7 +868,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys cannot modify the value of AtRestEncryptionEnabled after the replication group is created. To enable encryption at rest on a replication group you must set AtRestEncryptionEnabled to true when you create the replication group. Required: Only available when creating a - replication group in an Amazon VPC using redis version 3.2.6, 4.x or later. Default: false + replication group in an Amazon VPC using Redis OSS version 3.2.6, 4.x or later. Default: + false - `"AuthToken"`: Reserved parameter. The password used to access a password protected server. AuthToken can be specified only on replication groups where TransitEncryptionEnabled is true. For HIPAA compliance, you must specify @@ -875,27 +879,27 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys characters are !, &, #, , ^, <, >, and -. Other printable special characters cannot be used in the AUTH token. For more information, see AUTH password at http://redis.io/commands/AUTH. -- `"AutoMinorVersionUpgrade"`:  If you are running Redis engine version 6.0 or later, set - this parameter to yes if you want to opt-in to the next auto minor version upgrade +- `"AutoMinorVersionUpgrade"`:  If you are running Redis OSS engine version 6.0 or later, + set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. - `"AutomaticFailoverEnabled"`: Specifies whether a read-only replica is automatically promoted to read/write primary if the existing primary fails. AutomaticFailoverEnabled - must be enabled for Redis (cluster mode enabled) replication groups. Default: false + must be enabled for Redis OSS (cluster mode enabled) replication groups. Default: false - `"CacheNodeType"`: The compute and memory capacity of the nodes in the node group (shard). The following node types are supported by ElastiCache. Generally speaking, the current generation types provide more memory and computational power at lower cost when compared to their equivalent previous generation counterparts. General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region - availability, see Supported Node Types M6g node types (available only for Redis engine - version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.m6g.large, - cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, + availability, see Supported Node Types M6g node types (available only for Redis OSS + engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): + cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge - T4g node types (available only for Redis engine version 5.0.6 onward and Memcached engine - version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node types: - cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, + T4g node types (available only for Redis OSS engine version 5.0.6 onward and Memcached + engine version 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node + types: cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, cache.t2.medium Previous generation: (not recommended. Existing clusters are still supported but creation of new clusters is not supported for these types.) T1 node types: cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, @@ -905,9 +909,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys supported for these types.) C1 node types: cache.c1.xlarge Memory optimized: Current generation: R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge For region - availability, see Supported Node Types R6g node types (available only for Redis engine - version 5.0.6 onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, - cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, + availability, see Supported Node Types R6g node types (available only for Redis OSS + engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): + cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, @@ -916,16 +920,18 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All current generation instance types are created in Amazon VPC by default. - Redis append-only files (AOF) are not supported for T1 or T2 instances. Redis Multi-AZ - with automatic failover is not supported on T1 instances. Redis configuration variables - appendonly and appendfsync are not supported on Redis version 2.8.22 and later. + Redis OSS append-only files (AOF) are not supported for T1 or T2 instances. Redis OSS + Multi-AZ with automatic failover is not supported on T1 instances. Redis OSS + configuration variables appendonly and appendfsync are not supported on Redis OSS version + 2.8.22 and later. - `"CacheParameterGroupName"`: The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the - specified engine is used. If you are running Redis version 3.2.4 or later, only one node - group (shard), and want to use a default parameter group, we recommend that you specify the - parameter group by name. To create a Redis (cluster mode disabled) replication group, - use CacheParameterGroupName=default.redis3.2. To create a Redis (cluster mode enabled) - replication group, use CacheParameterGroupName=default.redis3.2.cluster.on. + specified engine is used. If you are running Redis OSS version 3.2.4 or later, only one + node group (shard), and want to use a default parameter group, we recommend that you + specify the parameter group by name. To create a Redis OSS (cluster mode disabled) + replication group, use CacheParameterGroupName=default.redis3.2. To create a Redis OSS + (cluster mode enabled) replication group, use + CacheParameterGroupName=default.redis3.2.cluster.on. - `"CacheSecurityGroupNames"`: A list of cache security group names to associate with this replication group. - `"CacheSubnetGroupName"`: The name of the cache subnet group to be used for the @@ -933,10 +939,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys create a subnet group before you start creating a cluster. For more information, see Subnets and Subnet Groups. - `"ClusterMode"`: Enabled or Disabled. To modify cluster mode from Disabled to Enabled, - you must first set the cluster mode to Compatible. Compatible mode allows your Redis + you must first set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you - migrate all Redis clients to use cluster mode enabled, you can then complete cluster mode - configuration and set the cluster mode to Enabled. + migrate all Redis OSS clients to use cluster mode enabled, you can then complete cluster + mode configuration and set the cluster mode to Enabled. - `"DataTieringEnabled"`: Enables data tiering. Data tiering is only supported for replication groups using the r6gd node type. This parameter must be set to true when using r6gd nodes. For more information, see Data tiering. @@ -951,23 +957,23 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the earlier engine version. - `"GlobalReplicationGroupId"`: The name of the Global datastore - `"IpDiscovery"`: The network type you choose when creating a replication group, either - ipv4 | ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward or + ipv4 | ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all instances built on the Nitro system. - `"KmsKeyId"`: The ID of the KMS key used to encrypt the disk in the cluster. - `"LogDeliveryConfigurations"`: Specifies the destination, format and type of the logs. - `"MultiAZEnabled"`: A flag indicating if you have Multi-AZ enabled to enhance fault tolerance. For more information, see Minimizing Downtime: Multi-AZ. - `"NetworkType"`: Must be either ipv4 | ipv6 | dual_stack. IPv6 is supported for workloads - using Redis engine version 6.2 onward or Memcached engine version 1.6.6 on all instances - built on the Nitro system. + using Redis OSS engine version 6.2 onward or Memcached engine version 1.6.6 on all + instances built on the Nitro system. - `"NodeGroupConfiguration"`: A list of node group (shard) configuration options. Each node group (shard) configuration has the following members: PrimaryAvailabilityZone, - ReplicaAvailabilityZones, ReplicaCount, and Slots. If you're creating a Redis (cluster mode - disabled) or a Redis (cluster mode enabled) replication group, you can use this parameter - to individually configure each node group (shard), or you can omit this parameter. However, - it is required when seeding a Redis (cluster mode enabled) cluster from a S3 rdb file. You - must configure each node group (shard) using this parameter because you must specify the - slots for each node group. + ReplicaAvailabilityZones, ReplicaCount, and Slots. If you're creating a Redis OSS (cluster + mode disabled) or a Redis OSS (cluster mode enabled) replication group, you can use this + parameter to individually configure each node group (shard), or you can omit this + parameter. However, it is required when seeding a Redis OSS (cluster mode enabled) cluster + from a S3 rdb file. You must configure each node group (shard) using this parameter because + you must specify the slots for each node group. - `"NotificationTopicArn"`: The Amazon Resource Name (ARN) of the Amazon Simple Notification Service (SNS) topic to which notifications are sent. The Amazon SNS topic owner must be the same as the cluster owner. @@ -978,8 +984,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys parameter (it will default to 1), or you can explicitly set it to a value between 2 and 6. The maximum permitted value for NumCacheClusters is 6 (1 primary plus 5 replicas). - `"NumNodeGroups"`: An optional parameter that specifies the number of node groups - (shards) for this Redis (cluster mode enabled) replication group. For Redis (cluster mode - disabled) either omit this parameter or set it to 1. Default: 1 + (shards) for this Redis OSS (cluster mode enabled) replication group. For Redis OSS + (cluster mode disabled) either omit this parameter or set it to 1. Default: 1 - `"Port"`: The port number on which each member of the replication group accepts connections. - `"PreferredCacheClusterAZs"`: A list of EC2 Availability Zones in which the replication @@ -1005,9 +1011,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys replication group. Use this parameter only when you are creating a replication group in an Amazon Virtual Private Cloud (Amazon VPC). - `"ServerlessCacheSnapshotName"`: The name of the snapshot used to create a replication - group. Available for Redis only. + group. Available for Redis OSS only. - `"SnapshotArns"`: A list of Amazon Resource Names (ARN) that uniquely identify the Redis - RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new + OSS RDB snapshot files stored in Amazon S3. The snapshot files are used to populate the new replication group. The Amazon S3 object name in the ARN cannot contain any commas. The new replication group will have the number of node groups (console: shards) specified by the parameter NumNodeGroups or the number of node groups configured by NodeGroupConfiguration @@ -1031,13 +1037,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys This parameter is valid only if the Engine parameter is redis, the EngineVersion parameter is 3.2.6, 4.x or later, and the cluster is being created in an Amazon VPC. If you enable in-transit encryption, you must also specify a value for CacheSubnetGroup. Required: Only - available when creating a replication group in an Amazon VPC using redis version 3.2.6, 4.x - or later. Default: false For HIPAA compliance, you must specify TransitEncryptionEnabled - as true, an AuthToken, and a CacheSubnetGroup. + available when creating a replication group in an Amazon VPC using Redis OSS version 3.2.6, + 4.x or later. Default: false For HIPAA compliance, you must specify + TransitEncryptionEnabled as true, an AuthToken, and a CacheSubnetGroup. - `"TransitEncryptionMode"`: A setting that allows you to migrate your clients to use in-transit encryption, with no downtime. When setting TransitEncryptionEnabled to true, you can set your TransitEncryptionMode to preferred in the same request, to allow both - encrypted and unencrypted connections at the same time. Once you migrate all your Redis + encrypted and unencrypted connections at the same time. Once you migrate all your Redis OSS clients to use encrypted connections you can modify the value to required to allow encrypted connections only. Setting TransitEncryptionMode to required is a two-step process that requires you to first set the TransitEncryptionMode to preferred, after that you can @@ -1100,7 +1106,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Units for the cache. - `"DailySnapshotTime"`: The daily time that snapshots will be created from the new serverless cache. By default this number is populated with 0, i.e. no snapshots will be - created on an automatic daily basis. Available for Redis only. + created on an automatic daily basis. Available for Redis OSS and Serverless Memcached only. - `"Description"`: User-provided description for the serverless cache. The default is NULL, i.e. if no description is provided then an empty string will be returned. The maximum length is 255 characters. @@ -1113,16 +1119,17 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys end-point (private-link). If no other information is given this will be the VPC’s Default Security Group that is associated with the cluster VPC end-point. - `"SnapshotArnsToRestore"`: The ARN(s) of the snapshot that the new serverless cache will - be created from. Available for Redis only. + be created from. Available for Redis OSS and Serverless Memcached only. - `"SnapshotRetentionLimit"`: The number of snapshots that will be retained for the serverless cache that is being created. As new snapshots beyond this limit are added, the - oldest snapshots will be deleted on a rolling basis. Available for Redis only. + oldest snapshots will be deleted on a rolling basis. Available for Redis OSS and Serverless + Memcached only. - `"SubnetIds"`: A list of the identifiers of the subnets where the VPC endpoint for the serverless cache will be deployed. All the subnetIds must belong to the same VPC. - `"Tags"`: The list of tags (key, value) pairs to be added to the serverless cache resource. Default is NULL. - `"UserGroupId"`: The identifier of the UserGroup to be associated with the serverless - cache. Available for Redis only. Default is NULL. + cache. Available for Redis OSS only. Default is NULL. """ function create_serverless_cache( Engine, ServerlessCacheName; aws_config::AbstractAWSConfig=global_aws_config() @@ -1161,20 +1168,21 @@ end create_serverless_cache_snapshot(serverless_cache_name, serverless_cache_snapshot_name, params::Dict{String,<:Any}) This API creates a copy of an entire ServerlessCache at a specific moment in time. -Available for Redis only. +Available for Redis OSS and Serverless Memcached only. # Arguments - `serverless_cache_name`: The name of an existing serverless cache. The snapshot is - created from this cache. Available for Redis only. + created from this cache. Available for Redis OSS and Serverless Memcached only. - `serverless_cache_snapshot_name`: The name for the snapshot being created. Must be unique - for the customer account. Available for Redis only. Must be between 1 and 255 characters. + for the customer account. Available for Redis OSS and Serverless Memcached only. Must be + between 1 and 255 characters. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"KmsKeyId"`: The ID of the KMS key used to encrypt the snapshot. Available for Redis - only. Default: NULL +- `"KmsKeyId"`: The ID of the KMS key used to encrypt the snapshot. Available for Redis OSS + and Serverless Memcached only. Default: NULL - `"Tags"`: A list of tags to be added to the snapshot resource. A tag is a key-value pair. - Available for Redis only. + Available for Redis OSS and Serverless Memcached only. """ function create_serverless_cache_snapshot( ServerlessCacheName, @@ -1219,7 +1227,7 @@ end create_snapshot(snapshot_name, params::Dict{String,<:Any}) Creates a copy of an entire cluster or replication group at a specific moment in time. -This operation is valid for Redis only. +This operation is valid for Redis OSS only. # Arguments - `snapshot_name`: A name for the snapshot being created. @@ -1261,8 +1269,8 @@ end create_user(access_string, engine, user_id, user_name) create_user(access_string, engine, user_id, user_name, params::Dict{String,<:Any}) -For Redis engine version 6.0 onwards: Creates a Redis user. For more information, see Using -Role Based Access Control (RBAC). +For Redis OSS engine version 6.0 onwards: Creates a Redis OSS user. For more information, +see Using Role Based Access Control (RBAC). # Arguments - `access_string`: Access permissions string used for this user. @@ -1329,17 +1337,18 @@ end create_user_group(engine, user_group_id) create_user_group(engine, user_group_id, params::Dict{String,<:Any}) -For Redis engine version 6.0 onwards: Creates a Redis user group. For more information, see -Using Role Based Access Control (RBAC) +For Redis OSS engine version 6.0 onwards: Creates a Redis OSS user group. For more +information, see Using Role Based Access Control (RBAC) # Arguments -- `engine`: The current supported value is Redis. +- `engine`: The current supported value is Redis user. - `user_group_id`: The ID of the user group. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Tags"`: A list of tags to be added to this resource. A tag is a key-value pair. A tag - key must be accompanied by a tag value, although null is accepted. Available for Redis only. + key must be accompanied by a tag value, although null is accepted. Available for Redis OSS + only. - `"UserIds"`: The list of user IDs that belong to the user group. """ function create_user_group( @@ -1390,12 +1399,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"GlobalNodeGroupsToRemove"`: If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. GlobalNodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster. - ElastiCache for Redis will attempt to remove all node groups listed by + ElastiCache (Redis OSS) will attempt to remove all node groups listed by GlobalNodeGroupsToRemove from the cluster. - `"GlobalNodeGroupsToRetain"`: If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. GlobalNodeGroupsToRetain is a list of NodeGroupIds to retain from the cluster. - ElastiCache for Redis will attempt to retain all node groups listed by + ElastiCache (Redis OSS) will attempt to retain all node groups listed by GlobalNodeGroupsToRetain from the cluster. """ function decrease_node_groups_in_global_replication_group( @@ -1444,10 +1453,10 @@ end decrease_replica_count(apply_immediately, replication_group_id) decrease_replica_count(apply_immediately, replication_group_id, params::Dict{String,<:Any}) -Dynamically decreases the number of replicas in a Redis (cluster mode disabled) replication -group or the number of replica nodes in one or more node groups (shards) of a Redis -(cluster mode enabled) replication group. This operation is performed with no cluster down -time. +Dynamically decreases the number of replicas in a Redis OSS (cluster mode disabled) +replication group or the number of replica nodes in one or more node groups (shards) of a +Redis OSS (cluster mode enabled) replication group. This operation is performed with no +cluster down time. # Arguments - `apply_immediately`: If True, the number of replica nodes is decreased immediately. @@ -1458,15 +1467,15 @@ time. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"NewReplicaCount"`: The number of read replica nodes you want at the completion of this - operation. For Redis (cluster mode disabled) replication groups, this is the number of - replica nodes in the replication group. For Redis (cluster mode enabled) replication + operation. For Redis OSS (cluster mode disabled) replication groups, this is the number of + replica nodes in the replication group. For Redis OSS (cluster mode enabled) replication groups, this is the number of replica nodes in each of the replication group's node groups. - The minimum number of replicas in a shard or replication group is: Redis (cluster mode - disabled) If Multi-AZ is enabled: 1 If Multi-AZ is not enabled: 0 Redis (cluster - mode enabled): 0 (though you will not be able to failover to a replica if your primary node - fails) + The minimum number of replicas in a shard or replication group is: Redis OSS (cluster + mode disabled) If Multi-AZ is enabled: 1 If Multi-AZ is not enabled: 0 Redis OSS + (cluster mode enabled): 0 (though you will not be able to failover to a replica if your + primary node fails) - `"ReplicaConfiguration"`: A list of ConfigureShard objects that can be used to configure - each shard in a Redis (cluster mode enabled) replication group. The ConfigureShard has + each shard in a Redis OSS (cluster mode enabled) replication group. The ConfigureShard has three members: NewReplicaCount, NodeGroupId, and PreferredAvailabilityZones. - `"ReplicasToRemove"`: A list of the node ids to remove from the replication group or node group (shard). @@ -1514,10 +1523,10 @@ end Deletes a previously provisioned cluster. DeleteCacheCluster deletes all associated cache nodes, node endpoints and the cluster itself. When you receive a successful response from this operation, Amazon ElastiCache immediately begins deleting the cluster; you cannot -cancel or revert this operation. This operation is not valid for: Redis (cluster mode -enabled) clusters Redis (cluster mode disabled) clusters A cluster that is the last +cancel or revert this operation. This operation is not valid for: Redis OSS (cluster mode +enabled) clusters Redis OSS (cluster mode disabled) clusters A cluster that is the last read replica of a replication group A cluster that is the primary node of a replication -group A node group (shard) that has Multi-AZ mode enabled A cluster from a Redis +group A node group (shard) that has Multi-AZ mode enabled A cluster from a Redis OSS (cluster mode enabled) replication group A cluster that is not in the available state # Arguments @@ -1748,8 +1757,9 @@ replication group, including the primary/primaries and all of the read replicas. replication group has only one primary, you can optionally delete only the read replicas, while retaining the primary by setting RetainPrimaryCluster=true. When you receive a successful response from this operation, Amazon ElastiCache immediately begins deleting the -selected resources; you cannot cancel or revert this operation. This operation is valid -for Redis only. +selected resources; you cannot cancel or revert this operation. CreateSnapshot +permission is required to create a final snapshot. Without this permission, the API call +will fail with an Access Denied exception. This operation is valid for Redis OSS only. # Arguments - `replication_group_id`: The identifier for the cluster to be deleted. This parameter is @@ -1795,7 +1805,9 @@ end delete_serverless_cache(serverless_cache_name) delete_serverless_cache(serverless_cache_name, params::Dict{String,<:Any}) -Deletes a specified existing serverless cache. +Deletes a specified existing serverless cache. CreateServerlessCacheSnapshot permission +is required to create a final snapshot. Without this permission, the API call will fail +with an Access Denied exception. # Arguments - `serverless_cache_name`: The identifier of the serverless cache to be deleted. @@ -1803,7 +1815,8 @@ Deletes a specified existing serverless cache. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"FinalSnapshotName"`: Name of the final snapshot to be taken before the serverless cache - is deleted. Available for Redis only. Default: NULL, i.e. a final snapshot is not taken. + is deleted. Available for Redis OSS and Serverless Memcached only. Default: NULL, i.e. a + final snapshot is not taken. """ function delete_serverless_cache( ServerlessCacheName; aws_config::AbstractAWSConfig=global_aws_config() @@ -1838,11 +1851,12 @@ end delete_serverless_cache_snapshot(serverless_cache_snapshot_name) delete_serverless_cache_snapshot(serverless_cache_snapshot_name, params::Dict{String,<:Any}) -Deletes an existing serverless cache snapshot. Available for Redis only. +Deletes an existing serverless cache snapshot. Available for Redis OSS and Serverless +Memcached only. # Arguments - `serverless_cache_snapshot_name`: Idenfitier of the snapshot to be deleted. Available for - Redis only. + Redis OSS and Serverless Memcached only. """ function delete_serverless_cache_snapshot( @@ -1882,7 +1896,7 @@ end Deletes an existing snapshot. When you receive a successful response from this operation, ElastiCache immediately begins deleting the snapshot; you cannot cancel or revert this -operation. This operation is valid for Redis only. +operation. This operation is valid for Redis OSS only. # Arguments - `snapshot_name`: The name of the snapshot to be deleted. @@ -1915,7 +1929,7 @@ end delete_user(user_id) delete_user(user_id, params::Dict{String,<:Any}) -For Redis engine version 6.0 onwards: Deletes a user. The user will be removed from all +For Redis OSS engine version 6.0 onwards: Deletes a user. The user will be removed from all user groups and in turn removed from all replication groups. For more information, see Using Role Based Access Control (RBAC). @@ -1946,8 +1960,8 @@ end delete_user_group(user_group_id) delete_user_group(user_group_id, params::Dict{String,<:Any}) -For Redis engine version 6.0 onwards: Deletes a user group. The user group must first be -disassociated from the replication group before it can be deleted. For more information, +For Redis OSS engine version 6.0 onwards: Deletes a user group. The user group must first +be disassociated from the replication group before it can be deleted. For more information, see Using Role Based Access Control (RBAC). # Arguments @@ -2007,7 +2021,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys remaining results can be retrieved. Default: 100 Constraints: minimum 20; maximum 100. - `"ShowCacheClustersNotInReplicationGroups"`: An optional flag that can be included in the DescribeCacheCluster request to show only nodes (API/CLI: clusters) that are not members of - a replication group. In practice, this mean Memcached and single node Redis clusters. + a replication group. In practice, this mean Memcached and single node Redis OSS clusters. - `"ShowCacheNodeInfo"`: An optional flag that can be included in the DescribeCacheCluster request to retrieve information about the individual cache nodes. """ @@ -2360,7 +2374,7 @@ end Returns information about a particular replication group. If no identifier is specified, DescribeReplicationGroups returns information about all replication groups. This operation -is valid for Redis only. +is valid for Redis OSS only. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2406,38 +2420,39 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys generation counterparts. General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported Node Types - M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached - engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, - cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge M5 node - types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, - cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, - cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available only for - Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): - cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node types: cache.t3.micro, - cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, - cache.t2.medium Previous generation: (not recommended. Existing clusters are still - supported but creation of new clusters is not supported for these types.) T1 node types: - cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, - cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, - cache.m3.2xlarge Compute optimized: Previous generation: (not recommended. Existing - clusters are still supported but creation of new clusters is not supported for these - types.) C1 node types: cache.c1.xlarge Memory optimized: Current generation: R7g - node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, - cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see - Supported Node Types R6g node types (available only for Redis engine version 5.0.6 - onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, - cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, - cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, - cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, - cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge - Previous generation: (not recommended. Existing clusters are still supported but creation - of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, - cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, - cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All - current generation instance types are created in Amazon VPC by default. Redis append-only - files (AOF) are not supported for T1 or T2 instances. Redis Multi-AZ with automatic - failover is not supported on T1 instances. Redis configuration variables appendonly and - appendfsync are not supported on Redis version 2.8.22 and later. + M6g node types (available only for Redis OSS engine version 5.0.6 onward and for + Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, + cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, + cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, + cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, + cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types + (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version + 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node types: + cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, + cache.t2.small, cache.t2.medium Previous generation: (not recommended. Existing clusters + are still supported but creation of new clusters is not supported for these types.) T1 + node types: cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, + cache.m1.large, cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, + cache.m3.xlarge, cache.m3.2xlarge Compute optimized: Previous generation: (not + recommended. Existing clusters are still supported but creation of new clusters is not + supported for these types.) C1 node types: cache.c1.xlarge Memory optimized: + Current generation: R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, + cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge For region + availability, see Supported Node Types R6g node types (available only for Redis OSS + engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): + cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, + cache.r6g.12xlarge, cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, + cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: + cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, + cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still + supported but creation of new clusters is not supported for these types.) M2 node types: + cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, + cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node + type info All current generation instance types are created in Amazon VPC by default. + Redis OSS append-only files (AOF) are not supported for T1 or T2 instances. Redis OSS + Multi-AZ with automatic failover is not supported on T1 instances. Redis OSS + configuration variables appendonly and appendfsync are not supported on Redis OSS version + 2.8.22 and later. - `"Duration"`: The duration filter value, specified in years or seconds. Use this parameter to show only reservations for this duration. Valid Values: 1 | 3 | 31536000 | 94608000 @@ -2489,38 +2504,39 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys generation counterparts. General purpose: Current generation: M7g node types: cache.m7g.large, cache.m7g.xlarge, cache.m7g.2xlarge, cache.m7g.4xlarge, cache.m7g.8xlarge, cache.m7g.12xlarge, cache.m7g.16xlarge For region availability, see Supported Node Types - M6g node types (available only for Redis engine version 5.0.6 onward and for Memcached - engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, cache.m6g.2xlarge, - cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, cache.m6g.16xlarge M5 node - types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, cache.m5.4xlarge, - cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, cache.m4.xlarge, - cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types (available only for - Redis engine version 5.0.6 onward and Memcached engine version 1.5.16 onward): - cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node types: cache.t3.micro, - cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, cache.t2.small, - cache.t2.medium Previous generation: (not recommended. Existing clusters are still - supported but creation of new clusters is not supported for these types.) T1 node types: - cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, cache.m1.large, - cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, cache.m3.xlarge, - cache.m3.2xlarge Compute optimized: Previous generation: (not recommended. Existing - clusters are still supported but creation of new clusters is not supported for these - types.) C1 node types: cache.c1.xlarge Memory optimized: Current generation: R7g - node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, cache.r7g.4xlarge, - cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge For region availability, see - Supported Node Types R6g node types (available only for Redis engine version 5.0.6 - onward and for Memcached engine version 1.5.16 onward): cache.r6g.large, cache.r6g.xlarge, - cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, cache.r6g.12xlarge, - cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, cache.r5.2xlarge, - cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: cache.r4.large, - cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, cache.r4.16xlarge - Previous generation: (not recommended. Existing clusters are still supported but creation - of new clusters is not supported for these types.) M2 node types: cache.m2.xlarge, - cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, cache.r3.xlarge, - cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node type info All - current generation instance types are created in Amazon VPC by default. Redis append-only - files (AOF) are not supported for T1 or T2 instances. Redis Multi-AZ with automatic - failover is not supported on T1 instances. Redis configuration variables appendonly and - appendfsync are not supported on Redis version 2.8.22 and later. + M6g node types (available only for Redis OSS engine version 5.0.6 onward and for + Memcached engine version 1.5.16 onward): cache.m6g.large, cache.m6g.xlarge, + cache.m6g.2xlarge, cache.m6g.4xlarge, cache.m6g.8xlarge, cache.m6g.12xlarge, + cache.m6g.16xlarge M5 node types: cache.m5.large, cache.m5.xlarge, cache.m5.2xlarge, + cache.m5.4xlarge, cache.m5.12xlarge, cache.m5.24xlarge M4 node types: cache.m4.large, + cache.m4.xlarge, cache.m4.2xlarge, cache.m4.4xlarge, cache.m4.10xlarge T4g node types + (available only for Redis OSS engine version 5.0.6 onward and Memcached engine version + 1.5.16 onward): cache.t4g.micro, cache.t4g.small, cache.t4g.medium T3 node types: + cache.t3.micro, cache.t3.small, cache.t3.medium T2 node types: cache.t2.micro, + cache.t2.small, cache.t2.medium Previous generation: (not recommended. Existing clusters + are still supported but creation of new clusters is not supported for these types.) T1 + node types: cache.t1.micro M1 node types: cache.m1.small, cache.m1.medium, + cache.m1.large, cache.m1.xlarge M3 node types: cache.m3.medium, cache.m3.large, + cache.m3.xlarge, cache.m3.2xlarge Compute optimized: Previous generation: (not + recommended. Existing clusters are still supported but creation of new clusters is not + supported for these types.) C1 node types: cache.c1.xlarge Memory optimized: + Current generation: R7g node types: cache.r7g.large, cache.r7g.xlarge, cache.r7g.2xlarge, + cache.r7g.4xlarge, cache.r7g.8xlarge, cache.r7g.12xlarge, cache.r7g.16xlarge For region + availability, see Supported Node Types R6g node types (available only for Redis OSS + engine version 5.0.6 onward and for Memcached engine version 1.5.16 onward): + cache.r6g.large, cache.r6g.xlarge, cache.r6g.2xlarge, cache.r6g.4xlarge, cache.r6g.8xlarge, + cache.r6g.12xlarge, cache.r6g.16xlarge R5 node types: cache.r5.large, cache.r5.xlarge, + cache.r5.2xlarge, cache.r5.4xlarge, cache.r5.12xlarge, cache.r5.24xlarge R4 node types: + cache.r4.large, cache.r4.xlarge, cache.r4.2xlarge, cache.r4.4xlarge, cache.r4.8xlarge, + cache.r4.16xlarge Previous generation: (not recommended. Existing clusters are still + supported but creation of new clusters is not supported for these types.) M2 node types: + cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge R3 node types: cache.r3.large, + cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, cache.r3.8xlarge Additional node + type info All current generation instance types are created in Amazon VPC by default. + Redis OSS append-only files (AOF) are not supported for T1 or T2 instances. Redis OSS + Multi-AZ with automatic failover is not supported on T1 instances. Redis OSS + configuration variables appendonly and appendfsync are not supported on Redis OSS version + 2.8.22 and later. - `"Duration"`: Duration filter value, specified in years or seconds. Use this parameter to show only reservations for a given duration. Valid Values: 1 | 3 | 31536000 | 94608000 - `"Marker"`: An optional marker returned from a prior request. Use this marker for @@ -2566,24 +2582,26 @@ end Returns information about serverless cache snapshots. By default, this API lists all of the customer’s serverless cache snapshots. It can also describe a single serverless cache snapshot, or the snapshots associated with a particular serverless cache. Available for -Redis only. +Redis OSS and Serverless Memcached only. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"MaxResults"`: The maximum number of records to include in the response. If more records exist than the specified max-results value, a market is included in the response so that - remaining results can be retrieved. Available for Redis only.The default is 50. The - Validation Constraints are a maximum of 50. + remaining results can be retrieved. Available for Redis OSS and Serverless Memcached + only.The default is 50. The Validation Constraints are a maximum of 50. - `"NextToken"`: An optional marker returned from a prior request to support pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by max-results. Available for Redis - only. + OSS and Serverless Memcached only. - `"ServerlessCacheName"`: The identifier of serverless cache. If this parameter is specified, only snapshots associated with that specific serverless cache are described. - Available for Redis only. + Available for Redis OSS and Serverless Memcached only. - `"ServerlessCacheSnapshotName"`: The identifier of the serverless cache’s snapshot. If - this parameter is specified, only this snapshot is described. Available for Redis only. -- `"SnapshotType"`: The type of snapshot that is being described. Available for Redis only. + this parameter is specified, only this snapshot is described. Available for Redis OSS and + Serverless Memcached only. +- `"SnapshotType"`: The type of snapshot that is being described. Available for Redis OSS + and Serverless Memcached only. """ function describe_serverless_cache_snapshots(; aws_config::AbstractAWSConfig=global_aws_config() @@ -2678,7 +2696,7 @@ end Returns information about cluster or replication group snapshots. By default, DescribeSnapshots lists all of your snapshots; it can optionally describe a single snapshot, or just the snapshots associated with a particular cache cluster. This operation -is valid for Redis only. +is valid for Redis OSS only. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2722,7 +2740,8 @@ Returns details of the update actions # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"CacheClusterIds"`: The cache cluster IDs -- `"Engine"`: The Elasticache engine to which the update applies. Either Redis or Memcached +- `"Engine"`: The Elasticache engine to which the update applies. Either Redis OSS or + Memcached. - `"Marker"`: An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords. @@ -2789,7 +2808,7 @@ Returns a list of users. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Engine"`: The Redis engine. +- `"Engine"`: The Redis OSS engine. - `"Filters"`: Filter to determine the list of User IDs to return. - `"Marker"`: An optional marker returned from a prior request. Use this marker for pagination of results from this operation. If this parameter is specified, the response @@ -2875,13 +2894,13 @@ end export_serverless_cache_snapshot(s3_bucket_name, serverless_cache_snapshot_name, params::Dict{String,<:Any}) Provides the functionality to export the serverless cache snapshot data to Amazon S3. -Available for Redis only. +Available for Redis OSS only. # Arguments - `s3_bucket_name`: Name of the Amazon S3 bucket to export the snapshot to. The Amazon S3 - bucket must also be in same region as the snapshot. Available for Redis only. + bucket must also be in same region as the snapshot. Available for Redis OSS only. - `serverless_cache_snapshot_name`: The identifier of the serverless cache snapshot to be - exported to S3. Available for Redis only. + exported to S3. Available for Redis OSS only. """ function export_serverless_cache_snapshot( @@ -3040,10 +3059,10 @@ end increase_replica_count(apply_immediately, replication_group_id) increase_replica_count(apply_immediately, replication_group_id, params::Dict{String,<:Any}) -Dynamically increases the number of replicas in a Redis (cluster mode disabled) replication -group or the number of replica nodes in one or more node groups (shards) of a Redis -(cluster mode enabled) replication group. This operation is performed with no cluster down -time. +Dynamically increases the number of replicas in a Redis OSS (cluster mode disabled) +replication group or the number of replica nodes in one or more node groups (shards) of a +Redis OSS (cluster mode enabled) replication group. This operation is performed with no +cluster down time. # Arguments - `apply_immediately`: If True, the number of replica nodes is increased immediately. @@ -3054,11 +3073,11 @@ time. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"NewReplicaCount"`: The number of read replica nodes you want at the completion of this - operation. For Redis (cluster mode disabled) replication groups, this is the number of - replica nodes in the replication group. For Redis (cluster mode enabled) replication + operation. For Redis OSS (cluster mode disabled) replication groups, this is the number of + replica nodes in the replication group. For Redis OSS (cluster mode enabled) replication groups, this is the number of replica nodes in each of the replication group's node groups. - `"ReplicaConfiguration"`: A list of ConfigureShard objects that can be used to configure - each shard in a Redis (cluster mode enabled) replication group. The ConfigureShard has + each shard in a Redis OSS (cluster mode enabled) replication group. The ConfigureShard has three members: NewReplicaCount, NodeGroupId, and PreferredAvailabilityZones. """ function increase_replica_count( @@ -3101,7 +3120,7 @@ end list_allowed_node_type_modifications() list_allowed_node_type_modifications(params::Dict{String,<:Any}) -Lists all available node types that you can scale your Redis cluster's or replication +Lists all available node types that you can scale your Redis OSS cluster's or replication group's current node type. When you use the ModifyCacheCluster or ModifyReplicationGroup operations to scale your cluster or replication group, the value of the CacheNodeType parameter must be one of the node types returned by this operation. @@ -3216,9 +3235,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys parameter must be specified with the auth-token parameter. Possible values: ROTATE - default, if no update strategy is provided SET - allowed only after ROTATE DELETE - allowed only when transitioning to RBAC For more information, see Authenticating Users - with Redis AUTH -- `"AutoMinorVersionUpgrade"`:  If you are running Redis engine version 6.0 or later, set - this parameter to yes if you want to opt-in to the next auto minor version upgrade + with Redis OSS AUTH +- `"AutoMinorVersionUpgrade"`:  If you are running Redis OSS engine version 6.0 or later, + set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. - `"CacheNodeIdsToRemove"`: A list of cache node IDs to be removed. A node ID is a numeric identifier (0001, 0002, etc.). This parameter is only valid when NumCacheNodes is less than @@ -3243,8 +3262,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys earlier engine version, you must delete the existing cluster and create it anew with the earlier engine version. - `"IpDiscovery"`: The network type you choose when modifying a cluster, either ipv4 | - ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached - engine version 1.6.6 on all instances built on the Nitro system. + ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or + Memcached engine version 1.6.6 on all instances built on the Nitro system. - `"LogDeliveryConfigurations"`: Specifies the destination, format and type of the logs. - `"NewAvailabilityZones"`: This option is only supported on Memcached clusters. The list of Availability Zones where the new Memcached cache nodes are created. This parameter is @@ -3282,8 +3301,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys value is less than the number of existing cache nodes, nodes are removed. If the value is equal to the number of current cache nodes, any pending add or remove requests are canceled. If you are removing cache nodes, you must use the CacheNodeIdsToRemove parameter - to provide the IDs of the specific cache nodes to remove. For clusters running Redis, this - value must be 1. For clusters running Memcached, this value must be between 1 and 40. + to provide the IDs of the specific cache nodes to remove. For clusters running Redis OSS, + this value must be 1. For clusters running Memcached, this value must be between 1 and 40. Adding or removing Memcached cache nodes can be applied immediately or as a pending operation (see ApplyImmediately). A pending operation to modify the number of cache nodes in a cluster during its maintenance window, whether by adding or removing nodes in @@ -3507,10 +3526,10 @@ end modify_replication_group(replication_group_id) modify_replication_group(replication_group_id, params::Dict{String,<:Any}) -Modifies the settings for a replication group. This is limited to Redis 7 and newer. -Scaling for Amazon ElastiCache for Redis (cluster mode enabled) in the ElastiCache User +Modifies the settings for a replication group. This is limited to Redis OSS 7 and newer. +Scaling for Amazon ElastiCache (Redis OSS) (cluster mode enabled) in the ElastiCache User Guide ModifyReplicationGroupShardConfiguration in the ElastiCache API Reference This -operation is valid for Redis only. +operation is valid for Redis OSS only. # Arguments - `replication_group_id`: The identifier of the replication group to modify. @@ -3532,9 +3551,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys parameter must be specified with the auth-token parameter. Possible values: ROTATE - default, if no update strategy is provided SET - allowed only after ROTATE DELETE - allowed only when transitioning to RBAC For more information, see Authenticating Users - with Redis AUTH -- `"AutoMinorVersionUpgrade"`:  If you are running Redis engine version 6.0 or later, set - this parameter to yes if you want to opt-in to the next auto minor version upgrade + with Redis OSS AUTH +- `"AutoMinorVersionUpgrade"`:  If you are running Redis OSS engine version 6.0 or later, + set this parameter to yes if you want to opt-in to the next auto minor version upgrade campaign. This parameter is disabled for previous versions. - `"AutomaticFailoverEnabled"`: Determines whether a read replica is automatically promoted to read/write primary if the existing primary encounters a failure. Valid values: true | @@ -3551,18 +3570,18 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys running outside of an Amazon Virtual Private Cloud (Amazon VPC). Constraints: Must contain no more than 255 alphanumeric characters. Must not be Default. - `"ClusterMode"`: Enabled or Disabled. To modify cluster mode from Disabled to Enabled, - you must first set the cluster mode to Compatible. Compatible mode allows your Redis + you must first set the cluster mode to Compatible. Compatible mode allows your Redis OSS clients to connect using both cluster mode enabled and cluster mode disabled. After you - migrate all Redis clients to use cluster mode enabled, you can then complete cluster mode - configuration and set the cluster mode to Enabled. + migrate all Redis OSS clients to use cluster mode enabled, you can then complete cluster + mode configuration and set the cluster mode to Enabled. - `"EngineVersion"`: The upgraded version of the cache engine to be run on the clusters in the replication group. Important: You can upgrade to a newer engine version (see Selecting a Cache Engine and Version), but you cannot downgrade to an earlier engine version. If you want to use an earlier engine version, you must delete the existing replication group and create it anew with the earlier engine version. - `"IpDiscovery"`: The network type you choose when modifying a cluster, either ipv4 | - ipv6. IPv6 is supported for workloads using Redis engine version 6.2 onward or Memcached - engine version 1.6.6 on all instances built on the Nitro system. + ipv6. IPv6 is supported for workloads using Redis OSS engine version 6.2 onward or + Memcached engine version 1.6.6 on all instances built on the Nitro system. - `"LogDeliveryConfigurations"`: Specifies the destination, format and type of the logs. - `"MultiAZEnabled"`: A flag to indicate MultiAZ is enabled. - `"NodeGroupId"`: Deprecated. This parameter is not used. @@ -3597,7 +3616,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys 05:00-09:00 If you do not specify this parameter, ElastiCache automatically chooses an appropriate time range. - `"SnapshottingClusterId"`: The cluster ID that is used as the daily snapshot source for - the replication group. This parameter cannot be set for Redis (cluster mode enabled) + the replication group. This parameter cannot be set for Redis OSS (cluster mode enabled) replication groups. - `"TransitEncryptionEnabled"`: A flag that enables in-transit encryption when set to true. If you are enabling in-transit encryption for an existing cluster, you must also set @@ -3606,10 +3625,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys in-transit encryption, with no downtime. You must set TransitEncryptionEnabled to true, for your existing cluster, and set TransitEncryptionMode to preferred in the same request to allow both encrypted and unencrypted connections at the same time. Once you migrate all - your Redis clients to use encrypted connections you can set the value to required to allow - encrypted connections only. Setting TransitEncryptionMode to required is a two-step process - that requires you to first set the TransitEncryptionMode to preferred, after that you can - set TransitEncryptionMode to required. + your Redis OSS clients to use encrypted connections you can set the value to required to + allow encrypted connections only. Setting TransitEncryptionMode to required is a two-step + process that requires you to first set the TransitEncryptionMode to preferred, after that + you can set TransitEncryptionMode to required. - `"UserGroupIdsToAdd"`: The ID of the user group you are associating with the replication group. - `"UserGroupIdsToRemove"`: The ID of the user group to disassociate from the replication @@ -3654,19 +3673,19 @@ shards, or rebalance the keyspaces among existing shards. At present, the only permitted value for this parameter is true. Value: true - `node_group_count`: The number of node groups (shards) that results from the modification of the shard configuration. -- `replication_group_id`: The name of the Redis (cluster mode enabled) cluster (replication - group) on which the shards are to be configured. +- `replication_group_id`: The name of the Redis OSS (cluster mode enabled) cluster + (replication group) on which the shards are to be configured. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"NodeGroupsToRemove"`: If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. - NodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster. ElastiCache for - Redis will attempt to remove all node groups listed by NodeGroupsToRemove from the cluster. + NodeGroupsToRemove is a list of NodeGroupIds to remove from the cluster. ElastiCache (Redis + OSS) will attempt to remove all node groups listed by NodeGroupsToRemove from the cluster. - `"NodeGroupsToRetain"`: If the value of NodeGroupCount is less than the current number of node groups (shards), then either NodeGroupsToRemove or NodeGroupsToRetain is required. - NodeGroupsToRetain is a list of NodeGroupIds to retain in the cluster. ElastiCache for - Redis will attempt to remove all node groups except those listed by NodeGroupsToRetain from + NodeGroupsToRetain is a list of NodeGroupIds to retain in the cluster. ElastiCache (Redis + OSS) will attempt to remove all node groups except those listed by NodeGroupsToRetain from the cluster. - `"ReshardingConfiguration"`: Specifies the preferred availability zones for each node group in the cluster. If the value of NodeGroupCount is greater than the current number of @@ -3730,23 +3749,25 @@ This API modifies the attributes of a serverless cache. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"CacheUsageLimits"`: Modify the cache usage limit for the serverless cache. - `"DailySnapshotTime"`: The daily time during which Elasticache begins taking a daily - snapshot of the serverless cache. Available for Redis only. The default is NULL, i.e. the - existing snapshot time configured for the cluster is not removed. + snapshot of the serverless cache. Available for Redis OSS and Serverless Memcached only. + The default is NULL, i.e. the existing snapshot time configured for the cluster is not + removed. - `"Description"`: User provided description for the serverless cache. Default = NULL, i.e. the existing description is not removed/modified. The description has a maximum length of 255 characters. - `"RemoveUserGroup"`: The identifier of the UserGroup to be removed from association with - the Redis serverless cache. Available for Redis only. Default is NULL. + the Redis OSS serverless cache. Available for Redis OSS only. Default is NULL. - `"SecurityGroupIds"`: The new list of VPC security groups to be associated with the serverless cache. Populating this list means the current VPC security groups will be removed. This security group is used to authorize traffic access for the VPC end-point (private-link). Default = NULL - the existing list of VPC security groups is not removed. - `"SnapshotRetentionLimit"`: The number of days for which Elasticache retains automatic - snapshots before deleting them. Available for Redis only. Default = NULL, i.e. the existing - snapshot-retention-limit will not be removed or modified. The maximum value allowed is 35 - days. + snapshots before deleting them. Available for Redis OSS and Serverless Memcached only. + Default = NULL, i.e. the existing snapshot-retention-limit will not be removed or modified. + The maximum value allowed is 35 days. - `"UserGroupId"`: The identifier of the UserGroup to be associated with the serverless - cache. Available for Redis only. Default is NULL - the existing UserGroup is not removed. + cache. Available for Redis OSS only. Default is NULL - the existing UserGroup is not + removed. """ function modify_serverless_cache( ServerlessCacheName; aws_config::AbstractAWSConfig=global_aws_config() @@ -3856,7 +3877,7 @@ end Allows you to purchase a reserved cache node offering. Reserved nodes are not eligible for cancellation and are non-refundable. For more information, see Managing Costs with Reserved -Nodes for Redis or Managing Costs with Reserved Nodes for Memcached. +Nodes for Redis OSS or Managing Costs with Reserved Nodes for Memcached. # Arguments - `reserved_cache_nodes_offering_id`: The ID of the reserved cache node offering to @@ -3961,9 +3982,9 @@ applies any modified cache parameter groups to the cluster. The reboot operation place as soon as possible, and results in a momentary outage to the cluster. During the reboot, the cluster status is set to REBOOTING. The reboot causes the contents of the cache (for each cache node being rebooted) to be lost. When the reboot is complete, a cluster -event is created. Rebooting a cluster is currently supported on Memcached and Redis -(cluster mode disabled) clusters. Rebooting is not supported on Redis (cluster mode -enabled) clusters. If you make changes to parameters that require a Redis (cluster mode +event is created. Rebooting a cluster is currently supported on Memcached and Redis OSS +(cluster mode disabled) clusters. Rebooting is not supported on Redis OSS (cluster mode +enabled) clusters. If you make changes to parameters that require a Redis OSS (cluster mode enabled) cluster reboot for the changes to be applied, see Rebooting a Cluster for an alternate process. @@ -4176,7 +4197,7 @@ Start the migration of data. # Arguments - `customer_node_endpoint_list`: List of endpoints from which data should be migrated. For - Redis (cluster mode disabled), list should have only one element. + Redis OSS (cluster mode disabled), list should have only one element. - `replication_group_id`: The ID of the replication group to which data should be migrated. """ @@ -4232,7 +4253,7 @@ following A customer can use this operation to test automatic failover on up shards (called node groups in the ElastiCache API and Amazon CLI) in any rolling 24-hour period. If calling this operation on shards in different clusters (called replication groups in the API and CLI), the calls can be made concurrently. If calling this -operation multiple times on different shards in the same Redis (cluster mode enabled) +operation multiple times on different shards in the same Redis OSS (cluster mode enabled) replication group, the first node replacement must complete before a subsequent call can be made. To determine whether the node replacement is complete you can check Events using the Amazon ElastiCache console, the Amazon CLI, or the ElastiCache API. Look for the diff --git a/src/services/entityresolution.jl b/src/services/entityresolution.jl index 6ad0a84154..fb58e8cb54 100644 --- a/src/services/entityresolution.jl +++ b/src/services/entityresolution.jl @@ -17,7 +17,9 @@ GetPolicy API. - `arn`: The Amazon Resource Name (ARN) of the resource that will be accessed by the principal. - `effect`: Determines whether the permissions specified in the policy are to be allowed - (Allow) or denied (Deny). + (Allow) or denied (Deny). If you set the value of the effect parameter to Deny for the + AddPolicyStatement operation, you must also set the value of the effect parameter in the + policy to Deny for the PutPolicy operation. - `principal`: The Amazon Web Services service or Amazon Web Services account that can access the resource defined as ARN. - `statement_id`: A statement identifier that differentiates the statement from others in @@ -116,20 +118,18 @@ function batch_delete_unique_id( end """ - create_id_mapping_workflow(id_mapping_techniques, input_source_config, role_arn, workflow_name) - create_id_mapping_workflow(id_mapping_techniques, input_source_config, role_arn, workflow_name, params::Dict{String,<:Any}) + create_id_mapping_workflow(id_mapping_techniques, input_source_config, workflow_name) + create_id_mapping_workflow(id_mapping_techniques, input_source_config, workflow_name, params::Dict{String,<:Any}) Creates an IdMappingWorkflow object which stores the configuration of the data processing job to be run. Each IdMappingWorkflow must have a unique workflow name. To modify an existing workflow, use the UpdateIdMappingWorkflow API. # Arguments -- `id_mapping_techniques`: An object which defines the idMappingType and the - providerProperties. +- `id_mapping_techniques`: An object which defines the ID mapping technique and any + additional configurations. - `input_source_config`: A list of InputSource objects, which have the fields InputSourceARN and SchemaName. -- `role_arn`: The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes - this role to create resources on your behalf as part of workflow execution. - `workflow_name`: The name of the workflow. There can't be multiple IdMappingWorkflows with the same name. @@ -138,12 +138,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"description"`: A description of the workflow. - `"outputSourceConfig"`: A list of IdMappingWorkflowOutputSource objects, each of which contains fields OutputS3Path and Output. +- `"roleArn"`: The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes + this role to create resources on your behalf as part of workflow execution. - `"tags"`: The tags used to organize, track, or control access for this resource. """ function create_id_mapping_workflow( idMappingTechniques, inputSourceConfig, - roleArn, workflowName; aws_config::AbstractAWSConfig=global_aws_config(), ) @@ -153,7 +154,6 @@ function create_id_mapping_workflow( Dict{String,Any}( "idMappingTechniques" => idMappingTechniques, "inputSourceConfig" => inputSourceConfig, - "roleArn" => roleArn, "workflowName" => workflowName, ); aws_config=aws_config, @@ -163,7 +163,6 @@ end function create_id_mapping_workflow( idMappingTechniques, inputSourceConfig, - roleArn, workflowName, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -177,7 +176,6 @@ function create_id_mapping_workflow( Dict{String,Any}( "idMappingTechniques" => idMappingTechniques, "inputSourceConfig" => inputSourceConfig, - "roleArn" => roleArn, "workflowName" => workflowName, ), params, @@ -1131,7 +1129,9 @@ Updates the resource-based policy. # Arguments - `arn`: The Amazon Resource Name (ARN) of the resource for which the policy needs to be updated. -- `policy`: The resource-based policy. +- `policy`: The resource-based policy. If you set the value of the effect parameter in the + policy to Deny for the PutPolicy operation, you must also set the value of the effect + parameter to Deny for the AddPolicyStatement operation. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1314,20 +1314,18 @@ function untag_resource( end """ - update_id_mapping_workflow(id_mapping_techniques, input_source_config, role_arn, workflow_name) - update_id_mapping_workflow(id_mapping_techniques, input_source_config, role_arn, workflow_name, params::Dict{String,<:Any}) + update_id_mapping_workflow(id_mapping_techniques, input_source_config, workflow_name) + update_id_mapping_workflow(id_mapping_techniques, input_source_config, workflow_name, params::Dict{String,<:Any}) Updates an existing IdMappingWorkflow. This method is identical to CreateIdMappingWorkflow, except it uses an HTTP PUT request instead of a POST request, and the IdMappingWorkflow must already exist for the method to succeed. # Arguments -- `id_mapping_techniques`: An object which defines the idMappingType and the - providerProperties. +- `id_mapping_techniques`: An object which defines the ID mapping technique and any + additional configurations. - `input_source_config`: A list of InputSource objects, which have the fields InputSourceARN and SchemaName. -- `role_arn`: The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes - this role to access Amazon Web Services resources on your behalf. - `workflow_name`: The name of the workflow. # Optional Parameters @@ -1335,11 +1333,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"description"`: A description of the workflow. - `"outputSourceConfig"`: A list of OutputSource objects, each of which contains fields OutputS3Path and KMSArn. +- `"roleArn"`: The Amazon Resource Name (ARN) of the IAM role. Entity Resolution assumes + this role to access Amazon Web Services resources on your behalf. """ function update_id_mapping_workflow( idMappingTechniques, inputSourceConfig, - roleArn, workflowName; aws_config::AbstractAWSConfig=global_aws_config(), ) @@ -1349,7 +1348,6 @@ function update_id_mapping_workflow( Dict{String,Any}( "idMappingTechniques" => idMappingTechniques, "inputSourceConfig" => inputSourceConfig, - "roleArn" => roleArn, ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, @@ -1358,7 +1356,6 @@ end function update_id_mapping_workflow( idMappingTechniques, inputSourceConfig, - roleArn, workflowName, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -1372,7 +1369,6 @@ function update_id_mapping_workflow( Dict{String,Any}( "idMappingTechniques" => idMappingTechniques, "inputSourceConfig" => inputSourceConfig, - "roleArn" => roleArn, ), params, ), diff --git a/src/services/finspace.jl b/src/services/finspace.jl index 996107228b..374dd5e3e1 100644 --- a/src/services/finspace.jl +++ b/src/services/finspace.jl @@ -461,6 +461,9 @@ Creates a new scaling group. the scaling group. - `host_type`: The memory and CPU capabilities of the scaling group host on which FinSpace Managed kdb clusters will be placed. You can add one of the following values: + kx.sg.large – The host type with a configuration of 16 GiB memory and 2 vCPUs. + kx.sg.xlarge – The host type with a configuration of 32 GiB memory and 4 vCPUs. + kx.sg.2xlarge – The host type with a configuration of 64 GiB memory and 8 vCPUs. kx.sg.4xlarge – The host type with a configuration of 108 GiB memory and 16 vCPUs. kx.sg.8xlarge – The host type with a configuration of 216 GiB memory and 32 vCPUs. kx.sg.16xlarge – The host type with a configuration of 432 GiB memory and 64 vCPUs. diff --git a/src/services/firehose.jl b/src/services/firehose.jl index d50f06233d..81040a0147 100644 --- a/src/services/firehose.jl +++ b/src/services/firehose.jl @@ -77,6 +77,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys only one destination. - `"HttpEndpointDestinationConfiguration"`: Enables configuring Kinesis Firehose to deliver data to any HTTP endpoint destination. You can specify only one destination. +- `"IcebergDestinationConfiguration"`: Configure Apache Iceberg Tables destination. + Amazon Data Firehose is in preview release and is subject to change. - `"KinesisStreamSourceConfiguration"`: When a Kinesis data stream is used as the source for the delivery stream, a KinesisStreamSourceConfiguration containing the Kinesis data stream Amazon Resource Name (ARN) and the role ARN for the source stream. @@ -744,6 +746,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ExtendedS3DestinationUpdate"`: Describes an update for a destination in Amazon S3. - `"HttpEndpointDestinationUpdate"`: Describes an update to the specified HTTP endpoint destination. +- `"IcebergDestinationUpdate"`: Describes an update for a destination in Apache Iceberg + Tables. Amazon Data Firehose is in preview release and is subject to change. - `"RedshiftDestinationUpdate"`: Describes an update for a destination in Amazon Redshift. - `"S3DestinationUpdate"`: [Deprecated] Describes an update for a destination in Amazon S3. - `"SnowflakeDestinationUpdate"`: Update to the Snowflake destination configuration diff --git a/src/services/fis.jl b/src/services/fis.jl index 52a6f21e24..e902fb08d5 100644 --- a/src/services/fis.jl +++ b/src/services/fis.jl @@ -325,6 +325,33 @@ function get_experiment_template( ) end +""" + get_safety_lever(id) + get_safety_lever(id, params::Dict{String,<:Any}) + + Gets information about the specified safety lever. + +# Arguments +- `id`: The ID of the safety lever. + +""" +function get_safety_lever(id; aws_config::AbstractAWSConfig=global_aws_config()) + return fis( + "GET", "/safetyLevers/$(id)"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function get_safety_lever( + id, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return fis( + "GET", + "/safetyLevers/$(id)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_target_account_configuration(account_id, id) get_target_account_configuration(account_id, id, params::Dict{String,<:Any}) @@ -850,6 +877,43 @@ function update_experiment_template( ) end +""" + update_safety_lever_state(id, state) + update_safety_lever_state(id, state, params::Dict{String,<:Any}) + + Updates the specified safety lever state. + +# Arguments +- `id`: The ID of the safety lever. +- `state`: The state of the safety lever. + +""" +function update_safety_lever_state( + id, state; aws_config::AbstractAWSConfig=global_aws_config() +) + return fis( + "PATCH", + "/safetyLevers/$(id)/state", + Dict{String,Any}("state" => state); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_safety_lever_state( + id, + state, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return fis( + "PATCH", + "/safetyLevers/$(id)/state", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("state" => state), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_target_account_configuration(account_id, id) update_target_account_configuration(account_id, id, params::Dict{String,<:Any}) diff --git a/src/services/gamelift.jl b/src/services/gamelift.jl index cfb70d02b4..3c19b0896f 100644 --- a/src/services/gamelift.jl +++ b/src/services/gamelift.jl @@ -236,10 +236,11 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys value determines the type of fleet resources that you use for this build. If your game build contains multiple executables, they all must run on the same operating system. You must specify a valid operating system in this request. There is no default value. You can't - change a build's operating system later. If you have active fleets using the Windows - Server 2012 operating system, you can continue to create new builds using this OS until - October 10, 2023, when Microsoft ends its support. All others must use Windows Server 2016 - when creating new Windows-based builds. + change a build's operating system later. Amazon Linux 2 (AL2) will reach end of support on + 6/30/2025. See more details in the Amazon Linux 2 FAQs. For game servers that are hosted on + AL2 and use Amazon GameLift server SDK 4.x., first update the game server build to server + SDK 5.x, and then deploy to AL2023 instances. See Migrate to Amazon GameLift server SDK + version 5. - `"ServerSdkVersion"`: A server SDK version you used when integrating your game server build with Amazon GameLift. For more information see Integrate games with custom game servers. By default Amazon GameLift sets this value to 4.0.2. @@ -307,7 +308,11 @@ design guide Create a container definition as a JSON file - `name`: A descriptive identifier for the container group definition. The name value must be unique in an Amazon Web Services Region. - `operating_system`: The platform that is used by containers in the container group - definition. All containers in a group must run on the same operating system. + definition. All containers in a group must run on the same operating system. Amazon Linux + 2 (AL2) will reach end of support on 6/30/2025. See more details in the Amazon Linux 2 + FAQs. For game servers that are hosted on AL2 and use Amazon GameLift server SDK 4.x., + first update the game server build to server SDK 5.x, and then deploy to AL2023 instances. + See Migrate to Amazon GameLift server SDK version 5. - `total_cpu_limit`: The maximum amount of CPU units to allocate to the container group. Set this parameter to an integer value in CPU units (1 vCPU is equal to 1024 CPU units). All containers in the group share this memory. If you specify CPU limits for individual @@ -489,12 +494,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys integrated with server SDK version 5.x. For more information about using shared credentials, see Communicate with other Amazon Web Services resources from your fleets. - `"Locations"`: A set of remote locations to deploy additional instances to and manage as - part of the fleet. This parameter can only be used when creating fleets in Amazon Web - Services Regions that support multiple locations. You can add any Amazon GameLift-supported - Amazon Web Services Region as a remote location, in the form of an Amazon Web Services - Region code, such as us-west-2 or Local Zone code. To create a fleet with instances in the - home Region only, don't set this parameter. When using this parameter, Amazon GameLift - requires you to include your home location in the request. + a multi-location fleet. Use this parameter when creating a fleet in Amazon Web Services + Regions that support multiple locations. You can add any Amazon Web Services Region or + Local Zone that's supported by Amazon GameLift. Provide a list of one or more Amazon Web + Services Region codes, such as us-west-2, or Local Zone names. When using this parameter, + Amazon GameLift requires you to include your home location in the request. For a list of + supported Regions and Local Zones, see Amazon GameLift service locations for managed + hosting. - `"LogPaths"`: This parameter is no longer used. To specify where Amazon GameLift should store log files once a server process shuts down, use the Amazon GameLift server API ProcessReady() and specify one or more directory paths in logParameters. For more @@ -574,7 +580,7 @@ one or more locations. If successful, this operation returns the list of added with their status set to NEW. Amazon GameLift initiates the process of starting an instance in each added location. You can track the status of each new location by monitoring location creation events using DescribeFleetEvents. Learn more Setting up fleets -Multi-location fleets +Update fleet locations Amazon GameLift service locations for managed hosting. # Arguments - `fleet_id`: A unique identifier for the fleet to add locations to. You can use either the @@ -962,10 +968,10 @@ Creates a custom location for use in an Anywhere fleet. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Tags"`: A list of labels to assign to the new matchmaking configuration resource. Tags - are developer-defined key-value pairs. Tagging Amazon Web Services resources are useful for - resource management, access management and cost allocation. For more information, see - Tagging Amazon Web Services Resources in the Amazon Web Services General Rareference. +- `"Tags"`: A list of labels to assign to the new resource. Tags are developer-defined + key-value pairs. Tagging Amazon Web Services resources are useful for resource management, + access management, and cost allocation. For more information, see Tagging Amazon Web + Services Resources in the Amazon Web Services General Rareference. """ function create_location(LocationName; aws_config::AbstractAWSConfig=global_aws_config()) return gamelift( @@ -2539,6 +2545,7 @@ set of sequential pages. If successful, a LocationAttributes object is returned requested location. If the fleet does not have a requested location, no information is returned. This operation does not return the home Region. To get information on a fleet's home Region, call DescribeFleetAttributes. Learn more Setting up Amazon GameLift fleets + Amazon GameLift service locations for managed hosting # Arguments - `fleet_id`: A unique identifier for the fleet to retrieve remote locations for. You can @@ -2586,7 +2593,8 @@ container groups. Use this operation to retrieve capacity information for a flee location or home Region (you can also retrieve home Region capacity by calling DescribeFleetCapacity). To retrieve capacity data, identify a fleet and location. If successful, a FleetCapacity object is returned for the requested fleet location. Learn -more Setting up Amazon GameLift fleets GameLift metrics for fleets +more Setting up Amazon GameLift fleets Amazon GameLift service locations for managed +hosting GameLift metrics for fleets # Arguments - `fleet_id`: A unique identifier for the fleet to request location capacity for. You can @@ -2634,8 +2642,8 @@ current game hosting activity at the requested location. Use this operation to r utilization information for a fleet's remote location or home Region (you can also retrieve home Region utilization by calling DescribeFleetUtilization). To retrieve utilization data, identify a fleet and location. If successful, a FleetUtilization object is returned for -the requested fleet location. Learn more Setting up Amazon GameLift fleets GameLift -metrics for fleets +the requested fleet location. Learn more Setting up Amazon GameLift fleets Amazon +GameLift service locations for managed hosting GameLift metrics for fleets # Arguments - `fleet_id`: A unique identifier for the fleet to request location utilization for. You @@ -3562,7 +3570,7 @@ EC2 Systems Manager User Guide. Container fleets With a container fleet (where type is CONTAINER), use these credentials and the target value with SSM to connect to the fleet instance where the container is running. After you're connected to the instance, use Docker commands to interact with the container. Learn more Remotely connect to fleet -instances Debug fleet issues Remotely connect to a container fleet +instances Debug fleet issues # Arguments - `compute_name`: A unique identifier for the compute resource that you want to connect to. @@ -4307,9 +4315,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DnsName"`: The DNS name of the compute resource. Amazon GameLift requires either a DNS name or IP address. - `"IpAddress"`: The IP address of the compute resource. Amazon GameLift requires either a - DNS name or IP address. + DNS name or IP address. When registering an Anywhere fleet, an IP address is required. - `"Location"`: The name of a custom location to associate with the compute resource being - registered. + registered. This parameter is required when registering a compute for an Anywhere fleet. """ function register_compute( ComputeName, FleetId; aws_config::AbstractAWSConfig=global_aws_config() diff --git a/src/services/glue.jl b/src/services/glue.jl index 381075a80b..31a8d1056b 100644 --- a/src/services/glue.jl +++ b/src/services/glue.jl @@ -657,6 +657,48 @@ function batch_get_workflows( ) end +""" + batch_put_data_quality_statistic_annotation(inclusion_annotations) + batch_put_data_quality_statistic_annotation(inclusion_annotations, params::Dict{String,<:Any}) + +Annotate datapoints over time for a specific data quality statistic. + +# Arguments +- `inclusion_annotations`: A list of DatapointInclusionAnnotation's. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ClientToken"`: Client Token. +""" +function batch_put_data_quality_statistic_annotation( + InclusionAnnotations; aws_config::AbstractAWSConfig=global_aws_config() +) + return glue( + "BatchPutDataQualityStatisticAnnotation", + Dict{String,Any}("InclusionAnnotations" => InclusionAnnotations); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_put_data_quality_statistic_annotation( + InclusionAnnotations, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return glue( + "BatchPutDataQualityStatisticAnnotation", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("InclusionAnnotations" => InclusionAnnotations), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ batch_stop_job_run(job_name, job_run_ids) batch_stop_job_run(job_name, job_run_ids, params::Dict{String,<:Any}) @@ -1200,6 +1242,8 @@ see the Glue developer guide. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"ClientToken"`: Used for idempotency and is recommended to be set to a random ID (such as a UUID) to avoid creating or starting multiple instances of the same resource. +- `"DataQualitySecurityConfiguration"`: The name of the security configuration created with + the data quality encryption option. - `"Description"`: A description of the data quality ruleset. - `"Tags"`: A list of tags applied to the data quality ruleset. - `"TargetTable"`: A target table associated with the data quality ruleset. @@ -1414,6 +1458,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys using the Glue Studio visual editor. NOTEBOOK - The job was created using an interactive sessions notebook. When the JobMode field is missing or null, SCRIPT is assigned as the default value. +- `"JobRunQueuingEnabled"`: Specifies whether job run queuing is enabled for the job runs + for this job. A value of true means job run queuing is enabled for the job runs. If false + or not populated, the job runs will not be considered for queueing. If this field does not + match the value set in the job run, then the value from the job run field will be used. - `"LogUri"`: This field is reserved for future use. - `"MaintenanceWindow"`: This field specifies a day of the week and hour for a maintenance window for streaming jobs. Glue periodically performs maintenance activities. During these @@ -4026,6 +4074,86 @@ function get_data_catalog_encryption_settings( ) end +""" + get_data_quality_model(profile_id) + get_data_quality_model(profile_id, params::Dict{String,<:Any}) + +Retrieve the training status of the model along with more information (CompletedOn, +StartedOn, FailureReason). + +# Arguments +- `profile_id`: The Profile ID. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"StatisticId"`: The Statistic ID. +""" +function get_data_quality_model( + ProfileId; aws_config::AbstractAWSConfig=global_aws_config() +) + return glue( + "GetDataQualityModel", + Dict{String,Any}("ProfileId" => ProfileId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_data_quality_model( + ProfileId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return glue( + "GetDataQualityModel", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ProfileId" => ProfileId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_data_quality_model_result(profile_id, statistic_id) + get_data_quality_model_result(profile_id, statistic_id, params::Dict{String,<:Any}) + +Retrieve a statistic's predictions for a given Profile ID. + +# Arguments +- `profile_id`: The Profile ID. +- `statistic_id`: The Statistic ID. + +""" +function get_data_quality_model_result( + ProfileId, StatisticId; aws_config::AbstractAWSConfig=global_aws_config() +) + return glue( + "GetDataQualityModelResult", + Dict{String,Any}("ProfileId" => ProfileId, "StatisticId" => StatisticId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_data_quality_model_result( + ProfileId, + StatisticId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return glue( + "GetDataQualityModelResult", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ProfileId" => ProfileId, "StatisticId" => StatisticId), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_data_quality_result(result_id) get_data_quality_result(result_id, params::Dict{String,<:Any}) @@ -4194,6 +4322,8 @@ Retrieves all databases defined in a given Data Catalog. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AttributesToGet"`: Specifies the database fields returned by the GetDatabases call. + This parameter doesn’t accept an empty list. The request must include the NAME. - `"CatalogId"`: The ID of the Data Catalog from which to retrieve Databases. If none is provided, the Amazon Web Services account ID is used by default. - `"MaxResults"`: The maximum number of databases to return in one response. @@ -5283,6 +5413,8 @@ Retrieves the Table definition in a Data Catalog for a specified table. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"CatalogId"`: The ID of the Data Catalog where the table resides. If none is provided, the Amazon Web Services account ID is used by default. +- `"IncludeStatusDetails"`: Specifies whether to include status details related to a + request to create or update an Glue Data Catalog view. - `"QueryAsOfTime"`: The time as of when to read the table contents. If not set, the most recent transaction commit time will be used. Cannot be specified along with TransactionId. - `"TransactionId"`: The transaction ID at which to read the table contents. @@ -5484,10 +5616,16 @@ Retrieves the definitions of some or all of the tables in a given Database. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AttributesToGet"`: Specifies the table fields returned by the GetTables call. This + parameter doesn’t accept an empty list. The request must include NAME. The following are + the valid combinations of values: NAME - Names of all tables in the database. NAME, + TABLE_TYPE - Names of all tables and the table types. - `"CatalogId"`: The ID of the Data Catalog where the tables reside. If none is provided, the Amazon Web Services account ID is used by default. - `"Expression"`: A regular expression pattern. If present, only those tables whose names match the pattern are returned. +- `"IncludeStatusDetails"`: Specifies whether to include status details related to a + request to create or update an Glue Data Catalog view. - `"MaxResults"`: The maximum number of tables to return in a single response. - `"NextToken"`: A continuation token, included if this is a continuation call. - `"QueryAsOfTime"`: The time as of when to read the table contents. If not set, the most @@ -6424,6 +6562,70 @@ function list_data_quality_rulesets( ) end +""" + list_data_quality_statistic_annotations() + list_data_quality_statistic_annotations(params::Dict{String,<:Any}) + +Retrieve annotations for a data quality statistic. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results to return in this request. +- `"NextToken"`: A pagination token to retrieve the next set of results. +- `"ProfileId"`: The Profile ID. +- `"StatisticId"`: The Statistic ID. +- `"TimestampFilter"`: A timestamp filter. +""" +function list_data_quality_statistic_annotations(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return glue( + "ListDataQualityStatisticAnnotations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_data_quality_statistic_annotations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return glue( + "ListDataQualityStatisticAnnotations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_data_quality_statistics() + list_data_quality_statistics(params::Dict{String,<:Any}) + +Retrieves a list of data quality statistics. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum number of results to return in this request. +- `"NextToken"`: A pagination token to request the next page of results. +- `"ProfileId"`: The Profile ID. +- `"StatisticId"`: The Statistic ID. +- `"TimestampFilter"`: A timestamp filter. +""" +function list_data_quality_statistics(; aws_config::AbstractAWSConfig=global_aws_config()) + return glue( + "ListDataQualityStatistics"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_data_quality_statistics( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return glue( + "ListDataQualityStatistics", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_dev_endpoints() list_dev_endpoints(params::Dict{String,<:Any}) @@ -6845,6 +7047,51 @@ function put_data_catalog_encryption_settings( ) end +""" + put_data_quality_profile_annotation(inclusion_annotation, profile_id) + put_data_quality_profile_annotation(inclusion_annotation, profile_id, params::Dict{String,<:Any}) + +Annotate all datapoints for a Profile. + +# Arguments +- `inclusion_annotation`: The inclusion annotation value to apply to the profile. +- `profile_id`: The ID of the data quality monitoring profile to annotate. + +""" +function put_data_quality_profile_annotation( + InclusionAnnotation, ProfileId; aws_config::AbstractAWSConfig=global_aws_config() +) + return glue( + "PutDataQualityProfileAnnotation", + Dict{String,Any}( + "InclusionAnnotation" => InclusionAnnotation, "ProfileId" => ProfileId + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function put_data_quality_profile_annotation( + InclusionAnnotation, + ProfileId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return glue( + "PutDataQualityProfileAnnotation", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "InclusionAnnotation" => InclusionAnnotation, "ProfileId" => ProfileId + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ put_resource_policy(policy_in_json) put_resource_policy(policy_in_json, params::Dict{String,<:Any}) @@ -7264,6 +7511,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys exact-match compared with the Value member of PropertyPredicate. For example, if Key=Name and Value=link, tables named customer-link and xx-link-yy are returned, but xxlinkyy is not returned. +- `"IncludeStatusDetails"`: Specifies whether to include status details related to a + request to create or update an Glue Data Catalog view. - `"MaxResults"`: The maximum number of tables to return in a single response. - `"NextToken"`: A continuation token, included if this is a continuation call. - `"ResourceShareType"`: Allows you to specify that you want to search the tables shared @@ -7471,6 +7720,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"ClientToken"`: Used for idempotency and is recommended to be set to a random ID (such as a UUID) to avoid creating or starting multiple instances of the same resource. - `"CreatedRulesetName"`: A name for the ruleset. +- `"DataQualitySecurityConfiguration"`: The name of the security configuration created with + the data quality encryption option. - `"NumberOfWorkers"`: The number of G.1X workers to be used in the run. The default is 5. - `"Timeout"`: The timeout for a run in minutes. This is the maximum time that a run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 @@ -7716,6 +7967,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys with Glue version 3.0 and above and command type glueetl will be allowed to set ExecutionClass to FLEX. The flexible execution class is available for Spark jobs. - `"JobRunId"`: The ID of a previous JobRun to retry. +- `"JobRunQueuingEnabled"`: Specifies whether job run queuing is enabled for the job run. A + value of true means job run queuing is enabled for the job run. If false or not populated, + the job run will not be considered for queueing. - `"MaxCapacity"`: For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity diff --git a/src/services/iam.jl b/src/services/iam.jl index e2b287fd1f..e7854c6181 100644 --- a/src/services/iam.jl +++ b/src/services/iam.jl @@ -625,14 +625,13 @@ audiences) that identify the application or applications allowed to authenticate OIDC provider A list of tags that are attached to the specified IAM OIDC provider A list of thumbprints of one or more server certificates that the IdP uses You get all of this information from the OIDC IdP you want to use to access Amazon Web Services. Amazon -Web Services secures communication with some OIDC identity providers (IdPs) through our -library of trusted root certificate authorities (CAs) instead of using a certificate -thumbprint to verify your IdP server certificate. In these cases, your legacy thumbprint -remains in your configuration, but is no longer used for validation. These OIDC IdPs -include Auth0, GitHub, GitLab, Google, and those that use an Amazon S3 bucket to host a -JSON Web Key Set (JWKS) endpoint. The trust for the OIDC provider is derived from the IAM -provider that this operation creates. Therefore, it is best to limit access to the -CreateOpenIDConnectProvider operation to highly privileged users. +Web Services secures communication with OIDC identity providers (IdPs) using our library of +trusted root certificate authorities (CAs) to verify the JSON Web Key Set (JWKS) endpoint's +TLS certificate. If your OIDC IdP relies on a certificate that is not signed by one of +these trusted CAs, only then we secure communication using the thumbprints set in the IdP's +configuration. The trust for the OIDC provider is derived from the IAM provider that this +operation creates. Therefore, it is best to limit access to the CreateOpenIDConnectProvider +operation to highly privileged users. # Arguments - `url`: The URL of the identity provider. The URL must begin with https:// and should @@ -4008,8 +4007,7 @@ end Lists the account alias associated with the Amazon Web Services account (Note: you can have only one). For information about using an Amazon Web Services account alias, see Creating, -deleting, and listing an Amazon Web Services account alias in the Amazon Web Services -Sign-In User Guide. +deleting, and listing an Amazon Web Services account alias in the IAM User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -7864,13 +7862,12 @@ are not merged.) Typically, you need to update a thumbprint only when the identi certificate changes, which occurs rarely. However, if the provider's certificate does change, any attempt to assume an IAM role that specifies the OIDC provider as a principal fails until the certificate thumbprint is updated. Amazon Web Services secures -communication with some OIDC identity providers (IdPs) through our library of trusted root -certificate authorities (CAs) instead of using a certificate thumbprint to verify your IdP -server certificate. In these cases, your legacy thumbprint remains in your configuration, -but is no longer used for validation. These OIDC IdPs include Auth0, GitHub, GitLab, -Google, and those that use an Amazon S3 bucket to host a JSON Web Key Set (JWKS) endpoint. - Trust for the OIDC provider is derived from the provider certificate and is validated by -the thumbprint. Therefore, it is best to limit access to the +communication with OIDC identity providers (IdPs) using our library of trusted root +certificate authorities (CAs) to verify the JSON Web Key Set (JWKS) endpoint's TLS +certificate. If your OIDC IdP relies on a certificate that is not signed by one of these +trusted CAs, only then we secure communication using the thumbprints set in the IdP's +configuration. Trust for the OIDC provider is derived from the provider certificate and +is validated by the thumbprint. Therefore, it is best to limit access to the UpdateOpenIDConnectProviderThumbprint operation to highly privileged users. # Arguments diff --git a/src/services/internetmonitor.jl b/src/services/internetmonitor.jl index 41826dae44..26b147dd90 100644 --- a/src/services/internetmonitor.jl +++ b/src/services/internetmonitor.jl @@ -526,9 +526,14 @@ Amazon CloudWatch Internet Monitor User Guide. and time to first byte (TTFB) information, for the top location and ASN combinations that you're monitoring, by traffic volume. TOP_LOCATION_DETAILS: Provides TTFB for Amazon CloudFront, your current configuration, and the best performing EC2 configuration, at 1 - hour intervals. For lists of the fields returned with each query type and more - information about how each type of query is performed, see Using the Amazon CloudWatch - Internet Monitor query interface in the Amazon CloudWatch Internet Monitor User Guide. + hour intervals. OVERALL_TRAFFIC_SUGGESTIONS: Provides TTFB, using a 30-day weighted + average, for all traffic in each Amazon Web Services location that is monitored. + OVERALL_TRAFFIC_SUGGESTIONS_DETAILS: Provides TTFB, using a 30-day weighted average, for + each top location, for a proposed Amazon Web Services location. Must provide a Amazon Web + Services location to search. For lists of the fields returned with each query type and + more information about how each type of query is performed, see Using the Amazon + CloudWatch Internet Monitor query interface in the Amazon CloudWatch Internet Monitor User + Guide. - `start_time`: The timestamp that is the beginning of the period that you want to retrieve data for with your query. diff --git a/src/services/iotsitewise.jl b/src/services/iotsitewise.jl index 69d92c5fbf..4012590bdc 100644 --- a/src/services/iotsitewise.jl +++ b/src/services/iotsitewise.jl @@ -559,7 +559,7 @@ reusable component that you can include in the composite models of other asset m can't create assets directly from this type of asset model. # Arguments -- `asset_model_name`: A unique, friendly name for the asset model. +- `asset_model_name`: A unique name for the asset model. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -568,7 +568,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys composite models that model parts of your industrial equipment. Each composite model has a type that defines the properties that the composite model supports. Use composite models to define alarms on this asset model. When creating custom composite models, you need to use - CreateAssetModelCompositeModel. For more information, see <LINK>. + CreateAssetModelCompositeModel. For more information, see Creating custom composite models + (Components) in the IoT SiteWise User Guide. - `"assetModelDescription"`: A description for the asset model. - `"assetModelExternalId"`: An external ID to assign to the asset model. The external ID must be unique within your Amazon Web Services account. For more information, see Using @@ -649,13 +650,22 @@ with assetModelType of COMPONENT_MODEL. To create an inline model, specify the assetModelCompositeModelProperties and don't include an composedAssetModelId. # Arguments -- `asset_model_composite_model_name`: A unique, friendly name for the composite model. +- `asset_model_composite_model_name`: A unique name for the composite model. - `asset_model_composite_model_type`: The composite model type. Valid values are AWS/ALARM, CUSTOM, or AWS/L4E_ANOMALY. - `asset_model_id`: The ID of the asset model this composite model is a part of. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"If-Match"`: The expected current entity tag (ETag) for the asset model’s latest or + active version (specified using matchForVersionType). The create request is rejected if the + tag does not match the latest or active version's current entity tag. See Optimistic + locking for asset model writes in the IoT SiteWise User Guide. +- `"If-None-Match"`: Accepts * to reject the create request if an active version (specified + using matchForVersionType as ACTIVE) already exists for the asset model. +- `"Match-For-Version-Type"`: Specifies the asset model version type (LATEST or ACTIVE) + used in conjunction with If-Match or If-None-Match headers to determine the target ETag for + the create operation. - `"assetModelCompositeModelDescription"`: A description for the composite model. - `"assetModelCompositeModelExternalId"`: An external ID to assign to the composite model. If the composite model is a derived composite model, or one nested inside a component @@ -666,12 +676,14 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys to supply your own ID instead, you can specify it here in UUID format. If you specify your own ID, it must be globally unique. - `"assetModelCompositeModelProperties"`: The property definitions of the composite model. - For more information, see <LINK>. You can specify up to 200 properties per composite - model. For more information, see Quotas in the IoT SiteWise User Guide. + For more information, see Inline custom composite models in the IoT SiteWise User Guide. + You can specify up to 200 properties per composite model. For more information, see Quotas + in the IoT SiteWise User Guide. - `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required. -- `"composedAssetModelId"`: The ID of a composite model on this asset. +- `"composedAssetModelId"`: The ID of a component model which is reused to create this + composite model. - `"parentAssetModelCompositeModelId"`: The ID of the parent composite model in this asset model relationship. """ @@ -875,7 +887,7 @@ from local servers to IoT SiteWise. For more information, see Ingesting data usi gateway in the IoT SiteWise User Guide. # Arguments -- `gateway_name`: A unique, friendly name for the gateway. +- `gateway_name`: A unique name for the gateway. - `gateway_platform`: The gateway's platform. You can only specify one platform in a gateway. @@ -1172,6 +1184,15 @@ models in the IoT SiteWise User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"If-Match"`: The expected current entity tag (ETag) for the asset model’s latest or + active version (specified using matchForVersionType). The delete request is rejected if the + tag does not match the latest or active version's current entity tag. See Optimistic + locking for asset model writes in the IoT SiteWise User Guide. +- `"If-None-Match"`: Accepts * to reject the delete request if an active version (specified + using matchForVersionType as ACTIVE) already exists for the asset model. +- `"Match-For-Version-Type"`: Specifies the asset model version type (LATEST or ACTIVE) + used in conjunction with If-Match or If-None-Match headers to determine the target ETag for + the delete operation. - `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required. @@ -1217,6 +1238,15 @@ assets and models in the IoT SiteWise User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"If-Match"`: The expected current entity tag (ETag) for the asset model’s latest or + active version (specified using matchForVersionType). The delete request is rejected if the + tag does not match the latest or active version's current entity tag. See Optimistic + locking for asset model writes in the IoT SiteWise User Guide. +- `"If-None-Match"`: Accepts * to reject the delete request if an active version (specified + using matchForVersionType as ACTIVE) already exists for the asset model. +- `"Match-For-Version-Type"`: Specifies the asset model version type (LATEST or ACTIVE) + used in conjunction with If-Match or If-None-Match headers to determine the target ETag for + the delete operation. - `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required. @@ -1608,6 +1638,9 @@ Retrieves information about an asset model. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"assetModelVersion"`: The version alias that specifies the latest or active version of + the asset model. The details are returned in the response. The default value is LATEST. See + Asset model versions in the IoT SiteWise User Guide. - `"excludeProperties"`: Whether or not to exclude asset model properties from the response. """ @@ -1652,6 +1685,11 @@ SiteWise User Guide. format, or else externalId: followed by the external ID, if it has one. For more information, see Referencing objects with external IDs in the IoT SiteWise User Guide. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"assetModelVersion"`: The version alias that specifies the latest or active version of + the asset model. The details are returned in the response. The default value is LATEST. See + Asset model versions in the IoT SiteWise User Guide. """ function describe_asset_model_composite_model( assetModelCompositeModelId, @@ -2668,6 +2706,9 @@ Retrieves a paginated list of composite models associated with the asset model # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"assetModelVersion"`: The version alias that specifies the latest or active version of + the asset model. The details are returned in the response. The default value is LATEST. See + Asset model versions in the IoT SiteWise User Guide. - `"maxResults"`: The maximum number of results to return for each paginated request. Default: 50 - `"nextToken"`: The token to be used for the next set of paginated results. @@ -2711,6 +2752,9 @@ to start all over again. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"assetModelVersion"`: The version alias that specifies the latest or active version of + the asset model. The details are returned in the response. The default value is LATEST. See + Asset model versions in the IoT SiteWise User Guide. - `"filter"`: Filters the requested list of asset model properties. You can choose one of the following options: ALL – The list includes all asset model properties for a given asset model ID. BASE – The list includes only base asset model properties for a given @@ -2751,10 +2795,14 @@ Retrieves a paginated list of summaries of all asset models. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"assetModelTypes"`: The type of asset model. ASSET_MODEL – (default) An asset model - that you can use to create assets. Can't be included as a component in another asset model. - COMPONENT_MODEL – A reusable component that you can include in the composite models of +- `"assetModelTypes"`: The type of asset model. If you don't provide an assetModelTypes, + all types of asset models are returned. ASSET_MODEL – An asset model that you can use + to create assets. Can't be included as a component in another asset model. + COMPONENT_MODEL – A reusable component that you can include in the composite models of other asset models. You can't create assets directly from this type of asset model. +- `"assetModelVersion"`: The version alias that specifies the latest or active version of + the asset model. The details are returned in the response. The default value is LATEST. See + Asset model versions in the IoT SiteWise User Guide. - `"maxResults"`: The maximum number of results to return for each paginated request. Default: 50 - `"nextToken"`: The token to be used for the next set of paginated results. @@ -2910,8 +2958,8 @@ end list_associated_assets(asset_id, params::Dict{String,<:Any}) Retrieves a paginated list of associated assets. You can use this operation to do the -following: List child assets associated to a parent asset by a hierarchy that you -specify. List an asset's parent asset. +following: CHILD - List all child assets associated to the asset. PARENT - List the +asset's parent asset. # Arguments - `asset_id`: The ID of the asset to query. This can be either the actual ID in UUID @@ -2920,20 +2968,18 @@ specify. List an asset's parent asset. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"hierarchyId"`: The ID of the hierarchy by which child assets are associated to the - asset. (This can be either the actual ID in UUID format, or else externalId: followed by - the external ID, if it has one. For more information, see Referencing objects with external - IDs in the IoT SiteWise User Guide.) To find a hierarchy ID, use the DescribeAsset or - DescribeAssetModel operations. This parameter is required if you choose CHILD for - traversalDirection. For more information, see Asset hierarchies in the IoT SiteWise User - Guide. +- `"hierarchyId"`: (Optional) If you don't provide a hierarchyId, all the immediate assets + in the traversalDirection will be returned. The ID of the hierarchy by which child assets + are associated to the asset. (This can be either the actual ID in UUID format, or else + externalId: followed by the external ID, if it has one. For more information, see + Referencing objects with external IDs in the IoT SiteWise User Guide.) For more + information, see Asset hierarchies in the IoT SiteWise User Guide. - `"maxResults"`: The maximum number of results to return for each paginated request. Default: 50 - `"nextToken"`: The token to be used for the next set of paginated results. - `"traversalDirection"`: The direction to list associated assets. Choose one of the following options: CHILD – The list includes all child assets associated to the asset. - The hierarchyId parameter is required if you choose CHILD. PARENT – The list includes - the asset's parent asset. Default: CHILD + PARENT – The list includes the asset's parent asset. Default: CHILD """ function list_associated_assets(assetId; aws_config::AbstractAWSConfig=global_aws_config()) return iotsitewise( @@ -3622,28 +3668,38 @@ end Updates an asset model and all of the assets that were created from the model. Each asset created from the model inherits the updated asset model's property and hierarchy definitions. For more information, see Updating assets and models in the IoT SiteWise User -Guide. This operation overwrites the existing model with the provided model. To avoid -deleting your asset model's properties or hierarchies, you must include their IDs and -definitions in the updated asset model payload. For more information, see -DescribeAssetModel. If you remove a property from an asset model, IoT SiteWise deletes all -previous data for that property. If you remove a hierarchy definition from an asset model, -IoT SiteWise disassociates every asset associated with that hierarchy. You can't change the -type or data type of an existing property. +Guide. If you remove a property from an asset model, IoT SiteWise deletes all previous +data for that property. You can’t change the type or data type of an existing property. +To replace an existing asset model property with a new one with the same name, do the +following: Submit an UpdateAssetModel request with the entire existing property removed. + Submit a second UpdateAssetModel request that includes the new property. The new asset +property will have the same name as the previous one and IoT SiteWise will generate a new +unique id. # Arguments - `asset_model_id`: The ID of the asset model to update. This can be either the actual ID in UUID format, or else externalId: followed by the external ID, if it has one. For more information, see Referencing objects with external IDs in the IoT SiteWise User Guide. -- `asset_model_name`: A unique, friendly name for the asset model. +- `asset_model_name`: A unique name for the asset model. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"If-Match"`: The expected current entity tag (ETag) for the asset model’s latest or + active version (specified using matchForVersionType). The update request is rejected if the + tag does not match the latest or active version's current entity tag. See Optimistic + locking for asset model writes in the IoT SiteWise User Guide. +- `"If-None-Match"`: Accepts * to reject the update request if an active version (specified + using matchForVersionType as ACTIVE) already exists for the asset model. +- `"Match-For-Version-Type"`: Specifies the asset model version type (LATEST or ACTIVE) + used in conjunction with If-Match or If-None-Match headers to determine the target ETag for + the update operation. - `"assetModelCompositeModels"`: The composite models that are part of this asset model. It groups properties (such as attributes, measurements, transforms, and metrics) and child composite models that model parts of your industrial equipment. Each composite model has a type that defines the properties that the composite model supports. Use composite models to define alarms on this asset model. When creating custom composite models, you need to use - CreateAssetModelCompositeModel. For more information, see <LINK>. + CreateAssetModelCompositeModel. For more information, see Creating custom composite models + (Components) in the IoT SiteWise User Guide. - `"assetModelDescription"`: A description for the asset model. - `"assetModelExternalId"`: An external ID to assign to the asset model. The asset model must not already have an external ID. The external ID must be unique within your Amazon Web @@ -3714,18 +3770,28 @@ previous one and IoT SiteWise will generate a new unique id. # Arguments - `asset_model_composite_model_id`: The ID of a composite model on this asset model. -- `asset_model_composite_model_name`: A unique, friendly name for the composite model. +- `asset_model_composite_model_name`: A unique name for the composite model. - `asset_model_id`: The ID of the asset model, in UUID format. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"If-Match"`: The expected current entity tag (ETag) for the asset model’s latest or + active version (specified using matchForVersionType). The update request is rejected if the + tag does not match the latest or active version's current entity tag. See Optimistic + locking for asset model writes in the IoT SiteWise User Guide. +- `"If-None-Match"`: Accepts * to reject the update request if an active version (specified + using matchForVersionType as ACTIVE) already exists for the asset model. +- `"Match-For-Version-Type"`: Specifies the asset model version type (LATEST or ACTIVE) + used in conjunction with If-Match or If-None-Match headers to determine the target ETag for + the update operation. - `"assetModelCompositeModelDescription"`: A description for the composite model. - `"assetModelCompositeModelExternalId"`: An external ID to assign to the asset model. You can only set the external ID of the asset model if it wasn't set when it was created, or you're setting it to the exact same thing as when it was created. - `"assetModelCompositeModelProperties"`: The property definitions of the composite model. - For more information, see <LINK>. You can specify up to 200 properties per composite - model. For more information, see Quotas in the IoT SiteWise User Guide. + For more information, see Inline custom composite models in the IoT SiteWise User Guide. + You can specify up to 200 properties per composite model. For more information, see Quotas + in the IoT SiteWise User Guide. - `"clientToken"`: A unique case-sensitive identifier that you can provide to ensure the idempotency of the request. Don't reuse this client token if a new idempotent request is required. @@ -3906,7 +3972,7 @@ Updates a gateway's name. # Arguments - `gateway_id`: The ID of the gateway to update. -- `gateway_name`: A unique, friendly name for the gateway. +- `gateway_name`: A unique name for the gateway. """ function update_gateway( diff --git a/src/services/ivs_realtime.jl b/src/services/ivs_realtime.jl index 8785c3e1d2..c298347d4e 100644 --- a/src/services/ivs_realtime.jl +++ b/src/services/ivs_realtime.jl @@ -100,8 +100,8 @@ Creates a new stage (and optionally participant tokens). # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"autoParticipantRecordingConfiguration"`: Auto participant recording configuration - object attached to the stage. +- `"autoParticipantRecordingConfiguration"`: Configuration object for individual + participant recording, to attach to the new stage. - `"name"`: Optional name that can be specified for the stage being created. - `"participantTokenConfigurations"`: Array of participant token configuration objects to attach to the new stage. @@ -203,6 +203,38 @@ function delete_encoder_configuration( ) end +""" + delete_public_key(arn) + delete_public_key(arn, params::Dict{String,<:Any}) + +Deletes the specified public key used to sign stage participant tokens. This invalidates +future participant tokens generated using the key pair’s private key. + +# Arguments +- `arn`: ARN of the public key to be deleted. + +""" +function delete_public_key(arn; aws_config::AbstractAWSConfig=global_aws_config()) + return ivs_realtime( + "POST", + "/DeletePublicKey", + Dict{String,Any}("arn" => arn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_public_key( + arn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs_realtime( + "POST", + "/DeletePublicKey", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("arn" => arn), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_stage(arn) delete_stage(arn, params::Dict{String,<:Any}) @@ -434,6 +466,37 @@ function get_participant( ) end +""" + get_public_key(arn) + get_public_key(arn, params::Dict{String,<:Any}) + +Gets information for the specified public key. + +# Arguments +- `arn`: ARN of the public key for which the information is to be retrieved. + +""" +function get_public_key(arn; aws_config::AbstractAWSConfig=global_aws_config()) + return ivs_realtime( + "POST", + "/GetPublicKey", + Dict{String,Any}("arn" => arn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_public_key( + arn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs_realtime( + "POST", + "/GetPublicKey", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("arn" => arn), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_stage(arn) get_stage(arn, params::Dict{String,<:Any}) @@ -539,6 +602,52 @@ function get_storage_configuration( ) end +""" + import_public_key(public_key_material) + import_public_key(public_key_material, params::Dict{String,<:Any}) + +Import a public key to be used for signing stage participant tokens. + +# Arguments +- `public_key_material`: The content of the public key to be imported. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"name"`: Name of the public key to be imported. +- `"tags"`: Tags attached to the resource. Array of maps, each of the form string:string + (key:value). See Tagging AWS Resources for details, including restrictions that apply to + tags and \"Tag naming limits and requirements\"; Amazon IVS has no constraints on tags + beyond what is documented there. +""" +function import_public_key( + publicKeyMaterial; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs_realtime( + "POST", + "/ImportPublicKey", + Dict{String,Any}("publicKeyMaterial" => publicKeyMaterial); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function import_public_key( + publicKeyMaterial, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ivs_realtime( + "POST", + "/ImportPublicKey", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("publicKeyMaterial" => publicKeyMaterial), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_compositions() list_compositions(params::Dict{String,<:Any}) @@ -725,6 +834,36 @@ function list_participants( ) end +""" + list_public_keys() + list_public_keys(params::Dict{String,<:Any}) + +Gets summary information about all public keys in your account, in the AWS region where the +API request is processed. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: Maximum number of results to return. Default: 50. +- `"nextToken"`: The first public key to retrieve. This is used for pagination; see the + nextToken response field. +""" +function list_public_keys(; aws_config::AbstractAWSConfig=global_aws_config()) + return ivs_realtime( + "POST", "/ListPublicKeys"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_public_keys( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ivs_realtime( + "POST", + "/ListPublicKeys", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_stage_sessions(stage_arn) list_stage_sessions(stage_arn, params::Dict{String,<:Any}) @@ -1049,9 +1188,9 @@ Updates a stage’s configuration. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"autoParticipantRecordingConfiguration"`: Auto-participant-recording configuration - object to attach to the stage. Auto-participant-recording configuration cannot be updated - while recording is active. +- `"autoParticipantRecordingConfiguration"`: Configuration object for individual + participant recording, to attach to the stage. Note that this cannot be updated while + recording is active. - `"name"`: Name of the stage to be updated. """ function update_stage(arn; aws_config::AbstractAWSConfig=global_aws_config()) diff --git a/src/services/kinesis_analytics_v2.jl b/src/services/kinesis_analytics_v2.jl index d8f45ba1f9..832d11d6c1 100644 --- a/src/services/kinesis_analytics_v2.jl +++ b/src/services/kinesis_analytics_v2.jl @@ -1012,6 +1012,52 @@ function describe_application( ) end +""" + describe_application_operation(application_name, operation_id) + describe_application_operation(application_name, operation_id, params::Dict{String,<:Any}) + +Returns information about a specific operation performed on a Managed Service for Apache +Flink application + +# Arguments +- `application_name`: +- `operation_id`: + +""" +function describe_application_operation( + ApplicationName, OperationId; aws_config::AbstractAWSConfig=global_aws_config() +) + return kinesis_analytics_v2( + "DescribeApplicationOperation", + Dict{String,Any}( + "ApplicationName" => ApplicationName, "OperationId" => OperationId + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_application_operation( + ApplicationName, + OperationId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return kinesis_analytics_v2( + "DescribeApplicationOperation", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "ApplicationName" => ApplicationName, "OperationId" => OperationId + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_application_snapshot(application_name, snapshot_name) describe_application_snapshot(application_name, snapshot_name, params::Dict{String,<:Any}) @@ -1164,6 +1210,50 @@ function discover_input_schema( ) end +""" + list_application_operations(application_name) + list_application_operations(application_name, params::Dict{String,<:Any}) + +Lists information about operations performed on a Managed Service for Apache Flink +application + +# Arguments +- `application_name`: + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Limit"`: +- `"NextToken"`: +- `"Operation"`: +- `"OperationStatus"`: +""" +function list_application_operations( + ApplicationName; aws_config::AbstractAWSConfig=global_aws_config() +) + return kinesis_analytics_v2( + "ListApplicationOperations", + Dict{String,Any}("ApplicationName" => ApplicationName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_application_operations( + ApplicationName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return kinesis_analytics_v2( + "ListApplicationOperations", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("ApplicationName" => ApplicationName), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_application_snapshots(application_name) list_application_snapshots(application_name, params::Dict{String,<:Any}) @@ -1324,11 +1414,10 @@ end rollback_application(application_name, current_application_version_id, params::Dict{String,<:Any}) Reverts the application to the previous running version. You can roll back an application -if you suspect it is stuck in a transient status. You can roll back an application only if -it is in the UPDATING or AUTOSCALING status. When you rollback an application, it loads -state data from the last successful snapshot. If the application has no snapshots, Managed -Service for Apache Flink rejects the rollback request. This action is not supported for -Managed Service for Apache Flink for SQL applications. +if you suspect it is stuck in a transient status or in the running status. You can roll +back an application only if it is in the UPDATING, AUTOSCALING, or RUNNING statuses. When +you rollback an application, it loads state data from the last successful snapshot. If the +application has no snapshots, Managed Service for Apache Flink rejects the rollback request. # Arguments - `application_name`: The name of the application. diff --git a/src/services/kinesis_video_webrtc_storage.jl b/src/services/kinesis_video_webrtc_storage.jl index c9ab0c4b37..e97eae6392 100644 --- a/src/services/kinesis_video_webrtc_storage.jl +++ b/src/services/kinesis_video_webrtc_storage.jl @@ -8,19 +8,27 @@ using AWS.UUIDs join_storage_session(channel_arn) join_storage_session(channel_arn, params::Dict{String,<:Any}) - Join the ongoing one way-video and/or multi-way audio WebRTC session as a video producing -device for an input channel. If there’s no existing session for the channel, a new -streaming session needs to be created, and the Amazon Resource Name (ARN) of the signaling -channel must be provided. Currently for the SINGLE_MASTER type, a video producing device -is able to ingest both audio and video media into a stream, while viewers can only ingest -audio. Both a video producing device and viewers can join the session first, and wait for -other participants. While participants are having peer to peer conversations through -webRTC, the ingested media session will be stored into the Kinesis Video Stream. Multiple -viewers are able to playback real-time media. Customers can also use existing Kinesis Video -Streams features like HLS or DASH playback, Image generation, and more with ingested WebRTC -media. Assume that only one video producing device client can be associated with a session -for the channel. If more than one client joins the session of a specific channel as a video -producing device, the most recent client request takes precedence. + Before using this API, you must call the GetSignalingChannelEndpoint API to request the +WEBRTC endpoint. You then specify the endpoint and region in your JoinStorageSession API +request. Join the ongoing one way-video and/or multi-way audio WebRTC session as a video +producing device for an input channel. If there’s no existing session for the channel, a +new streaming session needs to be created, and the Amazon Resource Name (ARN) of the +signaling channel must be provided. Currently for the SINGLE_MASTER type, a video +producing device is able to ingest both audio and video media into a stream. Only video +producing devices can join the session and record media. Both audio and video tracks are +currently required for WebRTC ingestion. Current requirements: Video track: H.264 Audio +track: Opus The resulting ingested video in the Kinesis video stream will have the +following parameters: H.264 video and AAC audio. Once a master participant has negotiated a +connection through WebRTC, the ingested media session will be stored in the Kinesis video +stream. Multiple viewers are then able to play back real-time media through our Playback +APIs. You can also use existing Kinesis Video Streams features like HLS or DASH playback, +image generation via GetImages, and more with ingested WebRTC media. S3 image delivery and +notifications are not currently supported. Assume that only one video producing device +client can be associated with a session for the channel. If more than one client joins the +session of a specific channel as a video producing device, the most recent client request +takes precedence. Additional information Idempotent - This API is not idempotent. +Retry behavior - This is counted as a new API call. Concurrent calls - Concurrent calls +are allowed. An offer is sent once per each call. # Arguments - `channel_arn`: The Amazon Resource Name (ARN) of the signaling channel. @@ -50,3 +58,57 @@ function join_storage_session( feature_set=SERVICE_FEATURE_SET, ) end + +""" + join_storage_session_as_viewer(channel_arn, client_id) + join_storage_session_as_viewer(channel_arn, client_id, params::Dict{String,<:Any}) + + Join the ongoing one way-video and/or multi-way audio WebRTC session as a viewer for an +input channel. If there’s no existing session for the channel, create a new streaming +session and provide the Amazon Resource Name (ARN) of the signaling channel (channelArn) +and client id (clientId). Currently for SINGLE_MASTER type, a video producing device is +able to ingest both audio and video media into a stream, while viewers can only ingest +audio. Both a video producing device and viewers can join a session first and wait for +other participants. While participants are having peer to peer conversations through +WebRTC, the ingested media session will be stored into the Kinesis Video Stream. Multiple +viewers are able to playback real-time media. Customers can also use existing Kinesis +Video Streams features like HLS or DASH playback, Image generation, and more with ingested +WebRTC media. If there’s an existing session with the same clientId that's found in the +join session request, the new request takes precedence. + +# Arguments +- `channel_arn`: The Amazon Resource Name (ARN) of the signaling channel. +- `client_id`: The unique identifier for the sender client. + +""" +function join_storage_session_as_viewer( + channelArn, clientId; aws_config::AbstractAWSConfig=global_aws_config() +) + return kinesis_video_webrtc_storage( + "POST", + "/joinStorageSessionAsViewer", + Dict{String,Any}("channelArn" => channelArn, "clientId" => clientId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function join_storage_session_as_viewer( + channelArn, + clientId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return kinesis_video_webrtc_storage( + "POST", + "/joinStorageSessionAsViewer", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("channelArn" => channelArn, "clientId" => clientId), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/lambda.jl b/src/services/lambda.jl index 2846e91a18..3c3925637d 100644 --- a/src/services/lambda.jl +++ b/src/services/lambda.jl @@ -83,20 +83,21 @@ end add_permission(action, function_name, principal, statement_id) add_permission(action, function_name, principal, statement_id, params::Dict{String,<:Any}) -Grants an Amazon Web Service, Amazon Web Services account, or Amazon Web Services +Grants an Amazon Web Servicesservice, Amazon Web Services account, or Amazon Web Services organization permission to use a function. You can apply the policy at the function level, or specify a qualifier to restrict access to a single version or alias. If you use a qualifier, the invoker must use the full Amazon Resource Name (ARN) of that version or alias to invoke the function. Note: Lambda does not support adding policies to version LATEST. To grant permission to another account, specify the account ID as the Principal. To grant permission to an organization defined in Organizations, specify the organization ID -as the PrincipalOrgID. For Amazon Web Services, the principal is a domain-style identifier -that the service defines, such as s3.amazonaws.com or sns.amazonaws.com. For Amazon Web -Services, you can also specify the ARN of the associated resource as the SourceArn. If you -grant permission to a service principal without specifying the source, other accounts could -potentially configure resources in their account to invoke your Lambda function. This -operation adds a statement to a resource-based permissions policy for the function. For -more information about function policies, see Using resource-based policies for Lambda. +as the PrincipalOrgID. For Amazon Web Servicesservices, the principal is a domain-style +identifier that the service defines, such as s3.amazonaws.com or sns.amazonaws.com. For +Amazon Web Servicesservices, you can also specify the ARN of the associated resource as the +SourceArn. If you grant permission to a service principal without specifying the source, +other accounts could potentially configure resources in their account to invoke your Lambda +function. This operation adds a statement to a resource-based permissions policy for the +function. For more information about function policies, see Using resource-based policies +for Lambda. # Arguments - `action`: The action that the principal can use on the function. For example, @@ -107,9 +108,9 @@ more information about function policies, see Using resource-based policies for 123456789012:function:my-function. You can append a version number or alias to any of the formats. The length constraint applies only to the full ARN. If you specify only the function name, it is limited to 64 characters in length. -- `principal`: The Amazon Web Service or Amazon Web Services account that invokes the - function. If you specify a service, use SourceArn or SourceAccount to limit who can invoke - the function through that service. +- `principal`: The Amazon Web Servicesservice or Amazon Web Services account that invokes + the function. If you specify a service, use SourceArn or SourceAccount to limit who can + invoke the function through that service. - `statement_id`: A statement identifier that differentiates the statement from others in the same policy. @@ -128,13 +129,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"RevisionId"`: Update the policy only if the revision ID matches the ID that's specified. Use this option to avoid modifying a policy that has changed since you last read it. -- `"SourceAccount"`: For Amazon Web Service, the ID of the Amazon Web Services account that - owns the resource. Use this together with SourceArn to ensure that the specified account - owns the resource. It is possible for an Amazon S3 bucket to be deleted by its owner and - recreated by another account. -- `"SourceArn"`: For Amazon Web Services, the ARN of the Amazon Web Services resource that - invokes the function. For example, an Amazon S3 bucket or Amazon SNS topic. Note that - Lambda configures the comparison using the StringLike operator. +- `"SourceAccount"`: For Amazon Web Servicesservice, the ID of the Amazon Web Services + account that owns the resource. Use this together with SourceArn to ensure that the + specified account owns the resource. It is possible for an Amazon S3 bucket to be deleted + by its owner and recreated by another account. +- `"SourceArn"`: For Amazon Web Servicesservices, the ARN of the Amazon Web Services + resource that invokes the function. For example, an Amazon S3 bucket or Amazon SNS topic. + Note that Lambda configures the comparison using the StringLike operator. """ function add_permission( Action, @@ -343,17 +344,21 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Lambda should process an event. For more information, see Lambda event filtering. - `"FunctionResponseTypes"`: (Kinesis, DynamoDB Streams, and Amazon SQS) A list of current response type enums applied to the event source mapping. +- `"KMSKeyArn"`: The ARN of the Key Management Service (KMS) customer managed key that + Lambda uses to encrypt your function's filter criteria. By default, Lambda does not encrypt + your filter criteria object. Specify this property to encrypt data using your own customer + managed key. - `"MaximumBatchingWindowInSeconds"`: The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function. You can configure MaximumBatchingWindowInSeconds to any value from 0 seconds to 300 seconds in increments of - seconds. For streams and Amazon SQS event sources, the default batching window is 0 - seconds. For Amazon MSK, Self-managed Apache Kafka, Amazon MQ, and DocumentDB event + seconds. For Kinesis, DynamoDB, and Amazon SQS event sources, the default batching window + is 0 seconds. For Amazon MSK, Self-managed Apache Kafka, Amazon MQ, and DocumentDB event sources, the default batching window is 500 ms. Note that because you can only change MaximumBatchingWindowInSeconds in increments of seconds, you cannot revert back to the 500 ms default batching window after you have changed it. To restore the default batching - window, you must create a new event source mapping. Related setting: For streams and Amazon - SQS event sources, when you set BatchSize to a value greater than 10, you must set - MaximumBatchingWindowInSeconds to at least 1. + window, you must create a new event source mapping. Related setting: For Kinesis, DynamoDB, + and Amazon SQS event sources, when you set BatchSize to a value greater than 10, you must + set MaximumBatchingWindowInSeconds to at least 1. - `"MaximumRecordAgeInSeconds"`: (Kinesis and DynamoDB Streams only) Discard records older than the specified age. The default value is infinite (-1). - `"MaximumRetryAttempts"`: (Kinesis and DynamoDB Streams only) Discard records after the @@ -413,41 +418,41 @@ end Creates a Lambda function. To create a function, you need a deployment package and an execution role. The deployment package is a .zip file archive or container image that contains your function code. The execution role grants the function permission to use -Amazon Web Services, such as Amazon CloudWatch Logs for log streaming and X-Ray for request -tracing. If the deployment package is a container image, then you set the package type to -Image. For a container image, the code property must include the URI of a container image -in the Amazon ECR registry. You do not need to specify the handler and runtime properties. -If the deployment package is a .zip file archive, then you set the package type to Zip. For -a .zip file archive, the code property specifies the location of the .zip file. You must -also specify the handler and runtime properties. The code in the deployment package must be -compatible with the target instruction set architecture of the function (x86-64 or arm64). -If you do not specify the architecture, then the default value is x86-64. When you create a -function, Lambda provisions an instance of the function and its supporting resources. If -your function connects to a VPC, this process can take a minute or so. During this time, -you can't invoke or modify the function. The State, StateReason, and StateReasonCode fields -in the response from GetFunctionConfiguration indicate when the function is ready to -invoke. For more information, see Lambda function states. A function has an unpublished -version, and can have published versions and aliases. The unpublished version changes when -you update your function's code and configuration. A published version is a snapshot of -your function code and configuration that can't be changed. An alias is a named resource -that maps to a version, and can be changed to map to a different version. Use the Publish -parameter to create version 1 of your function from its initial configuration. The other -parameters let you configure version-specific and function-level settings. You can modify -version-specific settings later with UpdateFunctionConfiguration. Function-level settings -apply to both the unpublished and published versions of the function, and include tags -(TagResource) and per-function concurrency limits (PutFunctionConcurrency). You can use -code signing if your deployment package is a .zip file archive. To enable code signing for -this function, specify the ARN of a code-signing configuration. When a user attempts to -deploy a code package with UpdateFunctionCode, Lambda checks that the code package has a -valid signature from a trusted publisher. The code-signing configuration includes set of -signing profiles, which define the trusted publishers for this function. If another Amazon -Web Services account or an Amazon Web Service invokes your function, use AddPermission to -grant permission by creating a resource-based Identity and Access Management (IAM) policy. -You can grant permissions at the function level, on a version, or on an alias. To invoke -your function directly, use Invoke. To invoke your function in response to events in other -Amazon Web Services, create an event source mapping (CreateEventSourceMapping), or -configure a function trigger in the other service. For more information, see Invoking -Lambda functions. +Amazon Web Servicesservices, such as Amazon CloudWatch Logs for log streaming and X-Ray for +request tracing. If the deployment package is a container image, then you set the package +type to Image. For a container image, the code property must include the URI of a container +image in the Amazon ECR registry. You do not need to specify the handler and runtime +properties. If the deployment package is a .zip file archive, then you set the package type +to Zip. For a .zip file archive, the code property specifies the location of the .zip file. +You must also specify the handler and runtime properties. The code in the deployment +package must be compatible with the target instruction set architecture of the function +(x86-64 or arm64). If you do not specify the architecture, then the default value is +x86-64. When you create a function, Lambda provisions an instance of the function and its +supporting resources. If your function connects to a VPC, this process can take a minute or +so. During this time, you can't invoke or modify the function. The State, StateReason, and +StateReasonCode fields in the response from GetFunctionConfiguration indicate when the +function is ready to invoke. For more information, see Lambda function states. A function +has an unpublished version, and can have published versions and aliases. The unpublished +version changes when you update your function's code and configuration. A published version +is a snapshot of your function code and configuration that can't be changed. An alias is a +named resource that maps to a version, and can be changed to map to a different version. +Use the Publish parameter to create version 1 of your function from its initial +configuration. The other parameters let you configure version-specific and function-level +settings. You can modify version-specific settings later with UpdateFunctionConfiguration. +Function-level settings apply to both the unpublished and published versions of the +function, and include tags (TagResource) and per-function concurrency limits +(PutFunctionConcurrency). You can use code signing if your deployment package is a .zip +file archive. To enable code signing for this function, specify the ARN of a code-signing +configuration. When a user attempts to deploy a code package with UpdateFunctionCode, +Lambda checks that the code package has a valid signature from a trusted publisher. The +code-signing configuration includes set of signing profiles, which define the trusted +publishers for this function. If another Amazon Web Services account or an Amazon Web +Servicesservice invokes your function, use AddPermission to grant permission by creating a +resource-based Identity and Access Management (IAM) policy. You can grant permissions at +the function level, on a version, or on an alias. To invoke your function directly, use +Invoke. To invoke your function in response to events in other Amazon Web Servicesservices, +create an event source mapping (CreateEventSourceMapping), or configure a function trigger +in the other service. For more information, see Invoking Lambda functions. # Arguments - `code`: The code for the function. @@ -497,9 +502,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"PackageType"`: The type of deployment package. Set to Image for container image and set to Zip for .zip file archive. - `"Publish"`: Set to true to publish the first version of the function during creation. -- `"Runtime"`: The identifier of the function's runtime. Runtime is required if the - deployment package is a .zip file archive. The following list includes deprecated runtimes. - For more information, see Runtime deprecation policy. +- `"Runtime"`: The identifier of the function's runtime. Runtime is required if the + deployment package is a .zip file archive. Specifying a runtime results in an error if + you're deploying a function using a container image. The following list includes deprecated + runtimes. Lambda blocks creating new functions and updating existing functions shortly + after each runtime is deprecated. For more information, see Runtime use after deprecation. + For a list of all currently supported runtimes, see Supported runtimes. - `"SnapStart"`: The function's SnapStart setting. - `"Tags"`: A list of tags to apply to the function. - `"Timeout"`: The amount of time (in seconds) that Lambda allows a function to run before @@ -719,9 +727,9 @@ end Deletes a Lambda function. To delete a specific function version, use the Qualifier parameter. Otherwise, all versions and aliases are deleted. This doesn't require the user to have explicit permissions for DeleteAlias. To delete Lambda event source mappings that -invoke a function, use DeleteEventSourceMapping. For Amazon Web Services and resources that -invoke your function directly, delete the trigger in the service where you originally -configured it. +invoke a function, use DeleteEventSourceMapping. For Amazon Web Servicesservices and +resources that invoke your function directly, delete the trigger in the service where you +originally configured it. # Arguments - `function_name`: The name or ARN of the Lambda function or version. Name formats @@ -1340,6 +1348,40 @@ function get_function_event_invoke_config( ) end +""" + get_function_recursion_config(function_name) + get_function_recursion_config(function_name, params::Dict{String,<:Any}) + +Returns your function's recursive loop detection configuration. + +# Arguments +- `function_name`: + +""" +function get_function_recursion_config( + FunctionName; aws_config::AbstractAWSConfig=global_aws_config() +) + return lambda( + "GET", + "/2024-08-31/functions/$(FunctionName)/recursion-config"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_function_recursion_config( + FunctionName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lambda( + "GET", + "/2024-08-31/functions/$(FunctionName)/recursion-config", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_function_url_config(function_name) get_function_url_config(function_name, params::Dict{String,<:Any}) @@ -2114,8 +2156,9 @@ are compatible with that architecture. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"CompatibleArchitecture"`: The compatible instruction set architecture. -- `"CompatibleRuntime"`: A runtime identifier. For example, java21. The following list - includes deprecated runtimes. For more information, see Runtime deprecation policy. +- `"CompatibleRuntime"`: A runtime identifier. The following list includes deprecated + runtimes. For more information, see Runtime use after deprecation. For a list of all + currently supported runtimes, see Supported runtimes. - `"Marker"`: A pagination token returned by a previous call. - `"MaxItems"`: The maximum number of versions to return. """ @@ -2153,8 +2196,9 @@ that instruction set architecture. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"CompatibleArchitecture"`: The compatible instruction set architecture. -- `"CompatibleRuntime"`: A runtime identifier. For example, java21. The following list - includes deprecated runtimes. For more information, see Runtime deprecation policy. +- `"CompatibleRuntime"`: A runtime identifier. The following list includes deprecated + runtimes. For more information, see Runtime use after deprecation. For a list of all + currently supported runtimes, see Supported runtimes. - `"Marker"`: A pagination token returned by a previous call. - `"MaxItems"`: The maximum number of layers to return. """ @@ -2566,6 +2610,69 @@ function put_function_event_invoke_config( ) end +""" + put_function_recursion_config(function_name, recursive_loop) + put_function_recursion_config(function_name, recursive_loop, params::Dict{String,<:Any}) + +Sets your function's recursive loop detection configuration. When you configure a Lambda +function to output to the same service or resource that invokes the function, it's possible +to create an infinite recursive loop. For example, a Lambda function might write a message +to an Amazon Simple Queue Service (Amazon SQS) queue, which then invokes the same function. +This invocation causes the function to write another message to the queue, which in turn +invokes the function again. Lambda can detect certain types of recursive loops shortly +after they occur. When Lambda detects a recursive loop and your function's recursive loop +detection configuration is set to Terminate, it stops your function being invoked and +notifies you. + +# Arguments +- `function_name`: The name or ARN of the Lambda function. Name formats Function name + – my-function. Function ARN – + arn:aws:lambda:us-west-2:123456789012:function:my-function. Partial ARN – + 123456789012:function:my-function. The length constraint applies only to the full ARN. If + you specify only the function name, it is limited to 64 characters in length. +- `recursive_loop`: If you set your function's recursive loop detection configuration to + Allow, Lambda doesn't take any action when it detects your function being invoked as part + of a recursive loop. We recommend that you only use this setting if your design + intentionally uses a Lambda function to write data back to the same Amazon Web Services + resource that invokes it. If you set your function's recursive loop detection configuration + to Terminate, Lambda stops your function being invoked and notifies you when it detects + your function being invoked as part of a recursive loop. By default, Lambda sets your + function's configuration to Terminate. If your design intentionally uses a Lambda function + to write data back to the same Amazon Web Services resource that invokes the function, then + use caution and implement suitable guard rails to prevent unexpected charges being billed + to your Amazon Web Services account. To learn more about best practices for using recursive + invocation patterns, see Recursive patterns that cause run-away Lambda functions in + Serverless Land. + +""" +function put_function_recursion_config( + FunctionName, RecursiveLoop; aws_config::AbstractAWSConfig=global_aws_config() +) + return lambda( + "PUT", + "/2024-08-31/functions/$(FunctionName)/recursion-config", + Dict{String,Any}("RecursiveLoop" => RecursiveLoop); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function put_function_recursion_config( + FunctionName, + RecursiveLoop, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return lambda( + "PUT", + "/2024-08-31/functions/$(FunctionName)/recursion-config", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("RecursiveLoop" => RecursiveLoop), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ put_provisioned_concurrency_config(function_name, provisioned_concurrent_executions, qualifier) put_provisioned_concurrency_config(function_name, provisioned_concurrent_executions, qualifier, params::Dict{String,<:Any}) @@ -2737,8 +2844,8 @@ end remove_permission(function_name, statement_id) remove_permission(function_name, statement_id, params::Dict{String,<:Any}) -Revokes function-use permission from an Amazon Web Service or another Amazon Web Services -account. You can get the ID of the statement from the output of GetPolicy. +Revokes function-use permission from an Amazon Web Servicesservice or another Amazon Web +Services account. You can get the ID of the statement from the output of GetPolicy. # Arguments - `function_name`: The name or ARN of the Lambda function, version, or alias. Name formats @@ -2992,17 +3099,21 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys you specify only the function name, it's limited to 64 characters in length. - `"FunctionResponseTypes"`: (Kinesis, DynamoDB Streams, and Amazon SQS) A list of current response type enums applied to the event source mapping. +- `"KMSKeyArn"`: The ARN of the Key Management Service (KMS) customer managed key that + Lambda uses to encrypt your function's filter criteria. By default, Lambda does not encrypt + your filter criteria object. Specify this property to encrypt data using your own customer + managed key. - `"MaximumBatchingWindowInSeconds"`: The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function. You can configure MaximumBatchingWindowInSeconds to any value from 0 seconds to 300 seconds in increments of - seconds. For streams and Amazon SQS event sources, the default batching window is 0 - seconds. For Amazon MSK, Self-managed Apache Kafka, Amazon MQ, and DocumentDB event + seconds. For Kinesis, DynamoDB, and Amazon SQS event sources, the default batching window + is 0 seconds. For Amazon MSK, Self-managed Apache Kafka, Amazon MQ, and DocumentDB event sources, the default batching window is 500 ms. Note that because you can only change MaximumBatchingWindowInSeconds in increments of seconds, you cannot revert back to the 500 ms default batching window after you have changed it. To restore the default batching - window, you must create a new event source mapping. Related setting: For streams and Amazon - SQS event sources, when you set BatchSize to a value greater than 10, you must set - MaximumBatchingWindowInSeconds to at least 1. + window, you must create a new event source mapping. Related setting: For Kinesis, DynamoDB, + and Amazon SQS event sources, when you set BatchSize to a value greater than 10, you must + set MaximumBatchingWindowInSeconds to at least 1. - `"MaximumRecordAgeInSeconds"`: (Kinesis and DynamoDB Streams only) Discard records older than the specified age. The default value is infinite (-1). - `"MaximumRetryAttempts"`: (Kinesis and DynamoDB Streams only) Discard records after the @@ -3127,8 +3238,8 @@ processing events with the new configuration. For more information, see Lambda f states. These settings can vary between versions of a function and are locked when you publish a version. You can't modify the configuration of a published version, only the unpublished version. To configure function concurrency, use PutFunctionConcurrency. To -grant invoke permissions to an Amazon Web Services account or Amazon Web Service, use -AddPermission. +grant invoke permissions to an Amazon Web Services account or Amazon Web Servicesservice, +use AddPermission. # Arguments - `function_name`: The name or ARN of the Lambda function. Name formats Function name @@ -3172,9 +3283,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys specified. Use this option to avoid modifying a function that has changed since you last read it. - `"Role"`: The Amazon Resource Name (ARN) of the function's execution role. -- `"Runtime"`: The identifier of the function's runtime. Runtime is required if the - deployment package is a .zip file archive. The following list includes deprecated runtimes. - For more information, see Runtime deprecation policy. +- `"Runtime"`: The identifier of the function's runtime. Runtime is required if the + deployment package is a .zip file archive. Specifying a runtime results in an error if + you're deploying a function using a container image. The following list includes deprecated + runtimes. Lambda blocks creating new functions and updating existing functions shortly + after each runtime is deprecated. For more information, see Runtime use after deprecation. + For a list of all currently supported runtimes, see Supported runtimes. - `"SnapStart"`: The function's SnapStart setting. - `"Timeout"`: The amount of time (in seconds) that Lambda allows a function to run before stopping it. The default is 3 seconds. The maximum allowed value is 900 seconds. For more diff --git a/src/services/lex_models_v2.jl b/src/services/lex_models_v2.jl index 77bcf483cc..a57e3d838d 100644 --- a/src/services/lex_models_v2.jl +++ b/src/services/lex_models_v2.jl @@ -745,7 +745,8 @@ end Adds a new resource policy statement to a bot or bot alias. If a resource policy exists, the statement is added to the current resource policy. If a policy doesn't exist, a new policy is created. You can't create a resource policy statement that allows cross-account -access. +access. You need to add the CreateResourcePolicy or UpdateResourcePolicy action to the bot +role in order to call the API. # Arguments - `action`: The Amazon Lex action that this policy either allows or denies. The action must @@ -1434,7 +1435,8 @@ end Deletes a policy statement from a resource policy. If you delete the last statement from a policy, the policy is deleted. If you specify a statement ID that doesn't exist in the policy, or if the bot or bot alias doesn't have a policy attached, Amazon Lex returns an -exception. +exception. You need to add the DeleteResourcePolicy or UpdateResourcePolicy action to the +bot role in order to call the API. # Arguments - `resource_arn`: The Amazon Resource Name (ARN) of the bot or bot alias that the resource diff --git a/src/services/license_manager_linux_subscriptions.jl b/src/services/license_manager_linux_subscriptions.jl index f9c028f39e..7505e96c4f 100644 --- a/src/services/license_manager_linux_subscriptions.jl +++ b/src/services/license_manager_linux_subscriptions.jl @@ -4,11 +4,97 @@ using AWS.AWSServices: license_manager_linux_subscriptions using AWS.Compat using AWS.UUIDs +""" + deregister_subscription_provider(subscription_provider_arn) + deregister_subscription_provider(subscription_provider_arn, params::Dict{String,<:Any}) + +Remove a third-party subscription provider from the Bring Your Own License (BYOL) +subscriptions registered to your account. + +# Arguments +- `subscription_provider_arn`: The Amazon Resource Name (ARN) of the subscription provider + resource to deregister. + +""" +function deregister_subscription_provider( + SubscriptionProviderArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return license_manager_linux_subscriptions( + "POST", + "/subscription/DeregisterSubscriptionProvider", + Dict{String,Any}("SubscriptionProviderArn" => SubscriptionProviderArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function deregister_subscription_provider( + SubscriptionProviderArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return license_manager_linux_subscriptions( + "POST", + "/subscription/DeregisterSubscriptionProvider", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("SubscriptionProviderArn" => SubscriptionProviderArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_registered_subscription_provider(subscription_provider_arn) + get_registered_subscription_provider(subscription_provider_arn, params::Dict{String,<:Any}) + +Get details for a Bring Your Own License (BYOL) subscription that's registered to your +account. + +# Arguments +- `subscription_provider_arn`: The Amazon Resource Name (ARN) of the BYOL registration + resource to get details for. + +""" +function get_registered_subscription_provider( + SubscriptionProviderArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return license_manager_linux_subscriptions( + "POST", + "/subscription/GetRegisteredSubscriptionProvider", + Dict{String,Any}("SubscriptionProviderArn" => SubscriptionProviderArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_registered_subscription_provider( + SubscriptionProviderArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return license_manager_linux_subscriptions( + "POST", + "/subscription/GetRegisteredSubscriptionProvider", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("SubscriptionProviderArn" => SubscriptionProviderArn), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_service_settings() get_service_settings(params::Dict{String,<:Any}) -Lists the Linux subscriptions service settings. +Lists the Linux subscriptions service settings for your account. """ function get_service_settings(; aws_config::AbstractAWSConfig=global_aws_config()) @@ -40,15 +126,17 @@ subscriptions. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"Filters"`: An array of structures that you can use to filter the results to those that - match one or more sets of key-value pairs that you specify. For example, you can filter by - the name of AmiID with an optional operator to see subscriptions that match, partially - match, or don't match a certain Amazon Machine Image (AMI) ID. The valid names for this - filter are: AmiID InstanceID AccountID Status Region UsageOperation - ProductCode InstanceType The valid Operators for this filter are: contains - equals Notequal -- `"MaxResults"`: Maximum number of results to return in a single call. -- `"NextToken"`: Token for the next set of results. +- `"Filters"`: An array of structures that you can use to filter the results by your + specified criteria. For example, you can specify Region in the Name, with the contains + operator to list all subscriptions that match a partial string in the Value, such as + us-west. For each filter, you can specify one of the following values for the Name key to + streamline results: AccountID AmiID DualSubscription InstanceID + InstanceType ProductCode Region Status UsageOperation For each filter, + you can use one of the following Operator values to define the behavior of the filter: + contains equals Notequal +- `"MaxResults"`: The maximum items to return in a request. +- `"NextToken"`: A token to specify where to start paginating. This is the nextToken from a + previously truncated response. """ function list_linux_subscription_instances(; aws_config::AbstractAWSConfig=global_aws_config() @@ -88,8 +176,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys partially match, or don't match a certain subscription's name. The valid names for this filter are: Subscription The valid Operators for this filter are: contains equals Notequal -- `"MaxResults"`: Maximum number of results to return in a single call. -- `"NextToken"`: Token for the next set of results. +- `"MaxResults"`: The maximum items to return in a request. +- `"NextToken"`: A token to specify where to start paginating. This is the nextToken from a + previously truncated response. """ function list_linux_subscriptions(; aws_config::AbstractAWSConfig=global_aws_config()) return license_manager_linux_subscriptions( @@ -111,6 +200,208 @@ function list_linux_subscriptions( ) end +""" + list_registered_subscription_providers() + list_registered_subscription_providers(params::Dict{String,<:Any}) + +List Bring Your Own License (BYOL) subscription registration resources for your account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"MaxResults"`: The maximum items to return in a request. +- `"NextToken"`: A token to specify where to start paginating. This is the nextToken from a + previously truncated response. +- `"SubscriptionProviderSources"`: To filter your results, specify which subscription + providers to return in the list. +""" +function list_registered_subscription_providers(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return license_manager_linux_subscriptions( + "POST", + "/subscription/ListRegisteredSubscriptionProviders"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_registered_subscription_providers( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return license_manager_linux_subscriptions( + "POST", + "/subscription/ListRegisteredSubscriptionProviders", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +List the metadata tags that are assigned to the specified Amazon Web Services resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource for which to list metadata + tags. + +""" +function list_tags_for_resource( + resourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return license_manager_linux_subscriptions( + "GET", + "/tags/$(resourceArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + resourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return license_manager_linux_subscriptions( + "GET", + "/tags/$(resourceArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + register_subscription_provider(secret_arn, subscription_provider_source) + register_subscription_provider(secret_arn, subscription_provider_source, params::Dict{String,<:Any}) + +Register the supported third-party subscription provider for your Bring Your Own License +(BYOL) subscription. + +# Arguments +- `secret_arn`: The Amazon Resource Name (ARN) of the secret where you've stored your + subscription provider's access token. For RHEL subscriptions managed through the Red Hat + Subscription Manager (RHSM), the secret contains your Red Hat Offline token. +- `subscription_provider_source`: The supported Linux subscription provider to register. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Tags"`: The metadata tags to assign to your registered Linux subscription provider + resource. +""" +function register_subscription_provider( + SecretArn, SubscriptionProviderSource; aws_config::AbstractAWSConfig=global_aws_config() +) + return license_manager_linux_subscriptions( + "POST", + "/subscription/RegisterSubscriptionProvider", + Dict{String,Any}( + "SecretArn" => SecretArn, + "SubscriptionProviderSource" => SubscriptionProviderSource, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function register_subscription_provider( + SecretArn, + SubscriptionProviderSource, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return license_manager_linux_subscriptions( + "POST", + "/subscription/RegisterSubscriptionProvider", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "SecretArn" => SecretArn, + "SubscriptionProviderSource" => SubscriptionProviderSource, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Add metadata tags to the specified Amazon Web Services resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the Amazon Web Services resource to + which to add the specified metadata tags. +- `tags`: The metadata tags to assign to the Amazon Web Services resource. Tags are + formatted as key value pairs. + +""" +function tag_resource(resourceArn, tags; aws_config::AbstractAWSConfig=global_aws_config()) + return license_manager_linux_subscriptions( + "PUT", + "/tags/$(resourceArn)", + Dict{String,Any}("tags" => tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + resourceArn, + tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return license_manager_linux_subscriptions( + "PUT", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tags" => tags), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Remove one or more metadata tag from the specified Amazon Web Services resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the Amazon Web Services resource to + remove the metadata tags from. +- `tag_keys`: A list of metadata tag keys to remove from the requested resource. + +""" +function untag_resource( + resourceArn, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return license_manager_linux_subscriptions( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}("tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + resourceArn, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return license_manager_linux_subscriptions( + "DELETE", + "/tags/$(resourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_service_settings(linux_subscriptions_discovery, linux_subscriptions_discovery_settings) update_service_settings(linux_subscriptions_discovery, linux_subscriptions_discovery_settings, params::Dict{String,<:Any}) diff --git a/src/services/mediaconnect.jl b/src/services/mediaconnect.jl index f286958480..a0afb675ce 100644 --- a/src/services/mediaconnect.jl +++ b/src/services/mediaconnect.jl @@ -311,6 +311,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"outputs"`: The outputs that you want to add to this flow. - `"source"`: - `"sourceFailoverConfig"`: +- `"sourceMonitoringConfig"`: - `"sources"`: - `"vpcInterfaces"`: The VPC interfaces you want on the flow. """ @@ -622,6 +623,38 @@ function describe_flow_source_metadata( ) end +""" + describe_flow_source_thumbnail(flow_arn) + describe_flow_source_thumbnail(flow_arn, params::Dict{String,<:Any}) + +Displays the thumbnail details of a flow's source stream. + +# Arguments +- `flow_arn`: The Amazon Resource Name (ARN) of the flow. + +""" +function describe_flow_source_thumbnail( + flowArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return mediaconnect( + "GET", + "/v1/flows/$(flowArn)/source-thumbnail"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_flow_source_thumbnail( + flowArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return mediaconnect( + "GET", + "/v1/flows/$(flowArn)/source-thumbnail", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_gateway(gateway_arn) describe_gateway(gateway_arn, params::Dict{String,<:Any}) @@ -1715,6 +1748,7 @@ Updates flow Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"maintenance"`: - `"sourceFailoverConfig"`: +- `"sourceMonitoringConfig"`: """ function update_flow(flowArn; aws_config::AbstractAWSConfig=global_aws_config()) return mediaconnect( @@ -1861,6 +1895,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys represents the minimal potential latency of that connection. The latency of the stream is set to the highest number between the sender’s minimum latency and the receiver’s minimum latency. +- `"outputStatus"`: An indication of whether the output should transmit data or not. If you + don't specify the outputStatus field in your request, MediaConnect leaves the value + unchanged. - `"port"`: The port to use when content is distributed to this output. - `"protocol"`: The protocol to use for the output. - `"remoteId"`: The remote ID for the Zixi-pull stream. diff --git a/src/services/medialive.jl b/src/services/medialive.jl index 6a03c9a9a0..c8642d1254 100644 --- a/src/services/medialive.jl +++ b/src/services/medialive.jl @@ -543,6 +543,7 @@ exactly two source URLs for redundancy. Only specify sources for PULL type Inputs. Leave Destinations empty. +- `"srtSettings"`: The settings associated with an SRT input. - `"tags"`: A collection of key-value pairs. - `"type"`: - `"vpc"`: @@ -3294,6 +3295,7 @@ exactly two source URLs for redundancy. Only specify sources for PULL type Inputs. Leave Destinations empty. +- `"srtSettings"`: The settings associated with an SRT input. """ function update_input(inputId; aws_config::AbstractAWSConfig=global_aws_config()) return medialive( @@ -3406,6 +3408,7 @@ Updates a multiplex. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"multiplexSettings"`: The new settings for a multiplex. - `"name"`: Name of the multiplex. +- `"packetIdentifiersMapping"`: """ function update_multiplex(multiplexId; aws_config::AbstractAWSConfig=global_aws_config()) return medialive( diff --git a/src/services/medical_imaging.jl b/src/services/medical_imaging.jl index 7467de1fc8..fb35fb86a0 100644 --- a/src/services/medical_imaging.jl +++ b/src/services/medical_imaging.jl @@ -15,6 +15,11 @@ Copy an image set. - `datastore_id`: The data store identifier. - `source_image_set_id`: The source image set identifier. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"force"`: Setting this flag will force the CopyImageSet operation, even if Patient, + Study, or Series level metadata are mismatched across the sourceImageSet and + destinationImageSet. """ function copy_image_set( copyImageSetInformation, @@ -705,6 +710,12 @@ Update image set metadata attributes. - `latest_version`: The latest image set version identifier. - `update_image_set_metadata_updates`: Update image set metadata updates. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"force"`: Setting this flag will force the UpdateImageSetMetadata operation for the + following attributes: Tag.StudyInstanceUID, Tag.SeriesInstanceUID, Tag.SOPInstanceUID, + and Tag.StudyID Adding, removing, or updating private tags for an individual SOP + Instance """ function update_image_set_metadata( datastoreId, diff --git a/src/services/memorydb.jl b/src/services/memorydb.jl index a2874c1158..c3d5532a81 100644 --- a/src/services/memorydb.jl +++ b/src/services/memorydb.jl @@ -158,7 +158,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys the r6gd node type. This parameter must be set when using r6gd nodes. For more information, see Data tiering. - `"Description"`: An optional description of the cluster. -- `"EngineVersion"`: The version number of the Redis engine to be used for the cluster. +- `"EngineVersion"`: The version number of the Redis OSS engine to be used for the cluster. - `"KmsKeyId"`: The ID of the KMS key used to encrypt the cluster. - `"MaintenanceWindow"`: Specifies the weekly time range during which maintenance on the cluster is performed. It is specified as a range in the format ddd:hh24:mi-ddd:hh24:mi (24H @@ -473,7 +473,9 @@ end delete_cluster(cluster_name) delete_cluster(cluster_name, params::Dict{String,<:Any}) -Deletes a cluster. It also deletes all associated nodes and node endpoints +Deletes a cluster. It also deletes all associated nodes and node endpoints CreateSnapshot +permission is required to create a final snapshot. Without this permission, the API call +will fail with an Access Denied exception. # Arguments - `cluster_name`: The name of the cluster to be deleted @@ -718,13 +720,13 @@ end describe_engine_versions() describe_engine_versions(params::Dict{String,<:Any}) -Returns a list of the available Redis engine versions. +Returns a list of the available Redis OSS engine versions. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"DefaultOnly"`: If true, specifies that only the default version of the specified engine or engine and major version combination is to be returned. -- `"EngineVersion"`: The Redis engine version +- `"EngineVersion"`: The Redis OSS engine version - `"MaxResults"`: The maximum number of records to include in the response. If more records exist than the specified MaxResults value, a token is included in the response so that the remaining results can be retrieved. diff --git a/src/services/mobile.jl b/src/services/mobile.jl deleted file mode 100644 index d348dc1d08..0000000000 --- a/src/services/mobile.jl +++ /dev/null @@ -1,299 +0,0 @@ -# This file is auto-generated by AWSMetadata.jl -using AWS -using AWS.AWSServices: mobile -using AWS.Compat -using AWS.UUIDs - -""" - create_project() - create_project(params::Dict{String,<:Any}) - - Creates an AWS Mobile Hub project. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"contents"`: ZIP or YAML file which contains configuration settings to be used when - creating the project. This may be the contents of the file downloaded from the URL provided - in an export project operation. -- `"name"`: Name of the project. -- `"region"`: Default region where project resources should be created. -- `"snapshotId"`: Unique identifier for an exported snapshot of project configuration. - This snapshot identifier is included in the share URL when a project is exported. -""" -function create_project(; aws_config::AbstractAWSConfig=global_aws_config()) - return mobile( - "POST", "/projects"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function create_project( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return mobile( - "POST", "/projects", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - delete_project(project_id) - delete_project(project_id, params::Dict{String,<:Any}) - - Delets a project in AWS Mobile Hub. - -# Arguments -- `project_id`: Unique project identifier. - -""" -function delete_project(projectId; aws_config::AbstractAWSConfig=global_aws_config()) - return mobile( - "DELETE", - "/projects/$(projectId)"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function delete_project( - projectId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return mobile( - "DELETE", - "/projects/$(projectId)", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - describe_bundle(bundle_id) - describe_bundle(bundle_id, params::Dict{String,<:Any}) - - Get the bundle details for the requested bundle id. - -# Arguments -- `bundle_id`: Unique bundle identifier. - -""" -function describe_bundle(bundleId; aws_config::AbstractAWSConfig=global_aws_config()) - return mobile( - "GET", - "/bundles/$(bundleId)"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function describe_bundle( - bundleId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return mobile( - "GET", - "/bundles/$(bundleId)", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - describe_project(project_id) - describe_project(project_id, params::Dict{String,<:Any}) - - Gets details about a project in AWS Mobile Hub. - -# Arguments -- `project_id`: Unique project identifier. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"syncFromResources"`: If set to true, causes AWS Mobile Hub to synchronize information - from other services, e.g., update state of AWS CloudFormation stacks in the AWS Mobile Hub - project. -""" -function describe_project(projectId; aws_config::AbstractAWSConfig=global_aws_config()) - return mobile( - "GET", - "/project", - Dict{String,Any}("projectId" => projectId); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function describe_project( - projectId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return mobile( - "GET", - "/project", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("projectId" => projectId), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - export_bundle(bundle_id) - export_bundle(bundle_id, params::Dict{String,<:Any}) - - Generates customized software development kit (SDK) and or tool packages used to integrate -mobile web or mobile app clients with backend AWS resources. - -# Arguments -- `bundle_id`: Unique bundle identifier. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"platform"`: Developer desktop or target application platform. -- `"projectId"`: Unique project identifier. -""" -function export_bundle(bundleId; aws_config::AbstractAWSConfig=global_aws_config()) - return mobile( - "POST", - "/bundles/$(bundleId)"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function export_bundle( - bundleId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return mobile( - "POST", - "/bundles/$(bundleId)", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - export_project(project_id) - export_project(project_id, params::Dict{String,<:Any}) - - Exports project configuration to a snapshot which can be downloaded and shared. Note that -mobile app push credentials are encrypted in exported projects, so they can only be shared -successfully within the same AWS account. - -# Arguments -- `project_id`: Unique project identifier. - -""" -function export_project(projectId; aws_config::AbstractAWSConfig=global_aws_config()) - return mobile( - "POST", - "/exports/$(projectId)"; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function export_project( - projectId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return mobile( - "POST", - "/exports/$(projectId)", - params; - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end - -""" - list_bundles() - list_bundles(params::Dict{String,<:Any}) - - List all available bundles. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: Maximum number of records to list in a single response. -- `"nextToken"`: Pagination token. Set to null to start listing bundles from start. If - non-null pagination token is returned in a result, then pass its value in here in another - request to list more bundles. -""" -function list_bundles(; aws_config::AbstractAWSConfig=global_aws_config()) - return mobile("GET", "/bundles"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) -end -function list_bundles( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return mobile( - "GET", "/bundles", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - list_projects() - list_projects(params::Dict{String,<:Any}) - - Lists projects in AWS Mobile Hub. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: Maximum number of records to list in a single response. -- `"nextToken"`: Pagination token. Set to null to start listing projects from start. If - non-null pagination token is returned in a result, then pass its value in here in another - request to list more projects. -""" -function list_projects(; aws_config::AbstractAWSConfig=global_aws_config()) - return mobile( - "GET", "/projects"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end -function list_projects( - params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() -) - return mobile( - "GET", "/projects", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET - ) -end - -""" - update_project(project_id) - update_project(project_id, params::Dict{String,<:Any}) - - Update an existing project. - -# Arguments -- `project_id`: Unique project identifier. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"contents"`: ZIP or YAML file which contains project configuration to be updated. This - should be the contents of the file downloaded from the URL provided in an export project - operation. -""" -function update_project(projectId; aws_config::AbstractAWSConfig=global_aws_config()) - return mobile( - "POST", - "/update", - Dict{String,Any}("projectId" => projectId); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end -function update_project( - projectId, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), -) - return mobile( - "POST", - "/update", - Dict{String,Any}( - mergewith(_merge, Dict{String,Any}("projectId" => projectId), params) - ); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, - ) -end diff --git a/src/services/mq.jl b/src/services/mq.jl index 69f6b4e24a..8d8ef4ffb5 100644 --- a/src/services/mq.jl +++ b/src/services/mq.jl @@ -5,8 +5,8 @@ using AWS.Compat using AWS.UUIDs """ - create_broker(auto_minor_version_upgrade, broker_name, deployment_mode, engine_type, engine_version, host_instance_type, publicly_accessible, users) - create_broker(auto_minor_version_upgrade, broker_name, deployment_mode, engine_type, engine_version, host_instance_type, publicly_accessible, users, params::Dict{String,<:Any}) + create_broker(broker_name, deployment_mode, engine_type, host_instance_type, publicly_accessible, users) + create_broker(broker_name, deployment_mode, engine_type, host_instance_type, publicly_accessible, users, params::Dict{String,<:Any}) Creates a broker. Note: This API is asynchronous. To create a broker, you must either use the AmazonMQFullAccess IAM policy or include the following EC2 permissions in your IAM @@ -21,10 +21,6 @@ Your Amazon Web Services Credentials and Never Modify or Delete the Amazon MQ El Network Interface in the Amazon MQ Developer Guide. # Arguments -- `auto_minor_version_upgrade`: Enables automatic upgrades to new minor versions for - brokers, as new versions are released and supported by Amazon MQ. Automatic upgrades occur - during the scheduled maintenance window of the broker or after a manual broker reboot. Set - to true by default, if no value is specified. - `broker_name`: Required. The broker's name. This value must be unique in your Amazon Web Services account, 1-50 characters long, must contain only letters, numbers, dashes, and underscores, and must not contain white spaces, brackets, wildcard characters, or special @@ -35,8 +31,6 @@ Network Interface in the Amazon MQ Developer Guide. - `deployment_mode`: Required. The broker's deployment mode. - `engine_type`: Required. The type of broker engine. Currently, Amazon MQ supports ACTIVEMQ and RABBITMQ. -- `engine_version`: Required. The broker engine's version. For a list of supported engine - versions, see Supported engines. - `host_instance_type`: Required. The broker's instance type. - `publicly_accessible`: Enables connections from applications outside of the VPC that hosts the broker's subnets. Set to false by default, if no value is provided. @@ -49,6 +43,11 @@ Network Interface in the Amazon MQ Developer Guide. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"authenticationStrategy"`: Optional. The authentication strategy used to secure the broker. The default is SIMPLE. +- `"autoMinorVersionUpgrade"`: Enables automatic upgrades to new patch versions for brokers + as new versions are released and supported by Amazon MQ. Automatic upgrades occur during + the scheduled maintenance window or after a manual broker reboot. Set to true by default, + if no value is specified. Must be set to true for ActiveMQ brokers version 5.18 and above + and for RabbitMQ brokers version 3.13 and above. - `"configuration"`: A list of information about the configuration. - `"creatorRequestId"`: The unique ID that the requester receives for the created broker. Amazon MQ passes your ID with the API action. We recommend using a Universally Unique @@ -59,6 +58,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys that is used to replicate data from in a data replication pair, and is applied to the replica broker. Must be set when dataReplicationMode is set to CRDR. - `"encryptionOptions"`: Encryption options for the broker. +- `"engineVersion"`: The broker engine version. Defaults to the latest available version + for the specified broker engine type. For more information, see the ActiveMQ version + management and the RabbitMQ version management sections in the Amazon MQ Developer Guide. - `"ldapServerMetadata"`: Optional. The metadata of the LDAP server used to authenticate and authorize connections to the broker. Does not apply to RabbitMQ brokers. - `"logs"`: Enables Amazon CloudWatch logging for brokers. @@ -81,11 +83,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"tags"`: Create tags when creating the broker. """ function create_broker( - autoMinorVersionUpgrade, brokerName, deploymentMode, engineType, - engineVersion, hostInstanceType, publiclyAccessible, users; @@ -95,11 +95,9 @@ function create_broker( "POST", "/v1/brokers", Dict{String,Any}( - "autoMinorVersionUpgrade" => autoMinorVersionUpgrade, "brokerName" => brokerName, "deploymentMode" => deploymentMode, "engineType" => engineType, - "engineVersion" => engineVersion, "hostInstanceType" => hostInstanceType, "publiclyAccessible" => publiclyAccessible, "users" => users, @@ -110,11 +108,9 @@ function create_broker( ) end function create_broker( - autoMinorVersionUpgrade, brokerName, deploymentMode, engineType, - engineVersion, hostInstanceType, publiclyAccessible, users, @@ -128,11 +124,9 @@ function create_broker( mergewith( _merge, Dict{String,Any}( - "autoMinorVersionUpgrade" => autoMinorVersionUpgrade, "brokerName" => brokerName, "deploymentMode" => deploymentMode, "engineType" => engineType, - "engineVersion" => engineVersion, "hostInstanceType" => hostInstanceType, "publiclyAccessible" => publiclyAccessible, "users" => users, @@ -147,8 +141,8 @@ function create_broker( end """ - create_configuration(engine_type, engine_version, name) - create_configuration(engine_type, engine_version, name, params::Dict{String,<:Any}) + create_configuration(engine_type, name) + create_configuration(engine_type, name, params::Dict{String,<:Any}) Creates a new configuration for the specified configuration name. Amazon MQ uses the default configuration (the engine type and version). @@ -156,8 +150,6 @@ default configuration (the engine type and version). # Arguments - `engine_type`: Required. The type of broker engine. Currently, Amazon MQ supports ACTIVEMQ and RABBITMQ. -- `engine_version`: Required. The broker engine's version. For a list of supported engine - versions, see Supported engines. - `name`: Required. The name of the configuration. This value can contain only alphanumeric characters, dashes, periods, underscores, and tildes (- . _ ~). This value must be 1-150 characters long. @@ -166,24 +158,24 @@ default configuration (the engine type and version). Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"authenticationStrategy"`: Optional. The authentication strategy associated with the configuration. The default is SIMPLE. +- `"engineVersion"`: The broker engine version. Defaults to the latest available version + for the specified broker engine type. For more information, see the ActiveMQ version + management and the RabbitMQ version management sections in the Amazon MQ Developer Guide. - `"tags"`: Create tags when creating the configuration. """ function create_configuration( - engineType, engineVersion, name; aws_config::AbstractAWSConfig=global_aws_config() + engineType, name; aws_config::AbstractAWSConfig=global_aws_config() ) return mq( "POST", "/v1/configurations", - Dict{String,Any}( - "engineType" => engineType, "engineVersion" => engineVersion, "name" => name - ); + Dict{String,Any}("engineType" => engineType, "name" => name); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function create_configuration( engineType, - engineVersion, name, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -193,13 +185,7 @@ function create_configuration( "/v1/configurations", Dict{String,Any}( mergewith( - _merge, - Dict{String,Any}( - "engineType" => engineType, - "engineVersion" => engineVersion, - "name" => name, - ), - params, + _merge, Dict{String,Any}("engineType" => engineType, "name" => name), params ), ); aws_config=aws_config, @@ -861,13 +847,16 @@ Adds a pending configuration change to a broker. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"authenticationStrategy"`: Optional. The authentication strategy used to secure the broker. The default is SIMPLE. -- `"autoMinorVersionUpgrade"`: Enables automatic upgrades to new minor versions for - brokers, as new versions are released and supported by Amazon MQ. Automatic upgrades occur - during the scheduled maintenance window of the broker or after a manual broker reboot. +- `"autoMinorVersionUpgrade"`: Enables automatic upgrades to new patch versions for brokers + as new versions are released and supported by Amazon MQ. Automatic upgrades occur during + the scheduled maintenance window or after a manual broker reboot. Must be set to true for + ActiveMQ brokers version 5.18 and above and for RabbitMQ brokers version 3.13 and above. - `"configuration"`: A list of information about the configuration. - `"dataReplicationMode"`: Defines whether this broker is a part of a data replication pair. -- `"engineVersion"`: The broker engine version. For a list of supported engine versions, - see Supported engines. +- `"engineVersion"`: The broker engine version. For more information, see the ActiveMQ + version management and the RabbitMQ version management sections in the Amazon MQ Developer + Guide. When upgrading to ActiveMQ version 5.18 and above or RabbitMQ version 3.13 and + above, you must have autoMinorVersionUpgrade set to true for the broker. - `"hostInstanceType"`: The broker's host instance type to upgrade to. For a list of supported instance types, see Broker instance types. - `"ldapServerMetadata"`: Optional. The metadata of the LDAP server used to authenticate diff --git a/src/services/network_firewall.jl b/src/services/network_firewall.jl index 332460ed1e..bdfd2e3c2e 100644 --- a/src/services/network_firewall.jl +++ b/src/services/network_firewall.jl @@ -381,17 +381,21 @@ end create_tlsinspection_configuration(tlsinspection_configuration, tlsinspection_configuration_name) create_tlsinspection_configuration(tlsinspection_configuration, tlsinspection_configuration_name, params::Dict{String,<:Any}) -Creates an Network Firewall TLS inspection configuration. A TLS inspection configuration -contains Certificate Manager certificate associations between and the scope configurations -that Network Firewall uses to decrypt and re-encrypt traffic traveling through your -firewall. After you create a TLS inspection configuration, you can associate it with a new -firewall policy. To update the settings for a TLS inspection configuration, use -UpdateTLSInspectionConfiguration. To manage a TLS inspection configuration's tags, use the -standard Amazon Web Services resource tagging operations, ListTagsForResource, TagResource, -and UntagResource. To retrieve information about TLS inspection configurations, use -ListTLSInspectionConfigurations and DescribeTLSInspectionConfiguration. For more -information about TLS inspection configurations, see Inspecting SSL/TLS traffic with TLS -inspection configurations in the Network Firewall Developer Guide. +Creates an Network Firewall TLS inspection configuration. Network Firewall uses TLS +inspection configurations to decrypt your firewall's inbound and outbound SSL/TLS traffic. +After decryption, Network Firewall inspects the traffic according to your firewall policy's +stateful rules, and then re-encrypts it before sending it to its destination. You can +enable inspection of your firewall's inbound traffic, outbound traffic, or both. To use TLS +inspection with your firewall, you must first import or provision certificates using ACM, +create a TLS inspection configuration, add that configuration to a new firewall policy, and +then associate that policy with your firewall. To update the settings for a TLS inspection +configuration, use UpdateTLSInspectionConfiguration. To manage a TLS inspection +configuration's tags, use the standard Amazon Web Services resource tagging operations, +ListTagsForResource, TagResource, and UntagResource. To retrieve information about TLS +inspection configurations, use ListTLSInspectionConfigurations and +DescribeTLSInspectionConfiguration. For more information about TLS inspection +configurations, see Inspecting SSL/TLS traffic with TLS inspection configurations in the +Network Firewall Developer Guide. # Arguments - `tlsinspection_configuration`: The object that defines a TLS inspection configuration. diff --git a/src/services/omics.jl b/src/services/omics.jl index 09f1fa0c4d..2e52f06d46 100644 --- a/src/services/omics.jl +++ b/src/services/omics.jl @@ -439,7 +439,8 @@ end create_run_group(request_id) create_run_group(request_id, params::Dict{String,<:Any}) -Creates a run group. +You can optionally create a run group to limit the compute resources for the runs that you +add to the group. # Arguments - `request_id`: To ensure that requests don't run multiple times, specify a unique ID for @@ -447,10 +448,13 @@ Creates a run group. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxCpus"`: The maximum number of CPUs to use in the group. -- `"maxDuration"`: A maximum run time for the group in minutes. -- `"maxGpus"`: The maximum GPUs that can be used by a run group. -- `"maxRuns"`: The maximum number of concurrent runs for the group. +- `"maxCpus"`: The maximum number of CPUs that can run concurrently across all active runs + in the run group. +- `"maxDuration"`: The maximum time for each run (in minutes). If a run exceeds the maximum + run time, the run fails automatically. +- `"maxGpus"`: The maximum number of GPUs that can run concurrently across all active runs + in the run group. +- `"maxRuns"`: The maximum number of runs that can be running at the same time. - `"name"`: A name for the group. - `"tags"`: Tags for the group. """ @@ -526,8 +530,8 @@ end Creates a cross-account shared resource. The resource owner makes an offer to share the resource with the principal subscriber (an AWS user with a different account than the -resource owner). The following resources support cross-account sharing: Healthomics -variant stores Healthomics annotation stores Private workflows +resource owner). The following resources support cross-account sharing: HealthOmics +variant stores HealthOmics annotation stores Private workflows # Arguments - `principal_subscriber`: The principal subscriber is the account being offered shared @@ -636,7 +640,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"main"`: The path of the main definition file for the workflow. - `"name"`: A name for the workflow. - `"parameterTemplate"`: A parameter template for the workflow. -- `"storageCapacity"`: The storage capacity for the workflow in gibibytes. +- `"storageCapacity"`: The default storage capacity for the workflow runs, in gibibytes. - `"tags"`: Tags for the workflow. """ function create_workflow(requestId; aws_config::AbstractAWSConfig=global_aws_config()) diff --git a/src/services/opensearch.jl b/src/services/opensearch.jl index c45c383def..282f8ec461 100644 --- a/src/services/opensearch.jl +++ b/src/services/opensearch.jl @@ -297,6 +297,7 @@ managing Amazon OpenSearch Service domains. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AIMLOptions"`: Options for all machine learning features for the specified domain. - `"AccessPolicies"`: Identity and Access Management (IAM) policy document specifying the access policies for the new domain. - `"AdvancedOptions"`: Key-value pairs to specify advanced configuration options. The @@ -2355,6 +2356,7 @@ Modifies the cluster configuration of the specified Amazon OpenSearch Service do # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AIMLOptions"`: Options for all machine learning features for the specified domain. - `"AccessPolicies"`: Identity and Access Management (IAM) access policy as a JSON-formatted string. - `"AdvancedOptions"`: Key-value pairs to specify advanced configuration options. The diff --git a/src/services/organizations.jl b/src/services/organizations.jl index ffb0df1a7b..83614b04b2 100644 --- a/src/services/organizations.jl +++ b/src/services/organizations.jl @@ -233,24 +233,23 @@ from the organization's management account. For more information about creating see Creating a member account in your organization in the Organizations User Guide. When you create an account in an organization using the Organizations console, API, or CLI commands, the information required for the account to operate as a standalone account, such -as a payment method and signing the end user license agreement (EULA) is not automatically -collected. If you must remove an account from your organization later, you can do so only -after you provide the missing information. For more information, see Considerations before -removing an account from an organization in the Organizations User Guide. If you get an -exception that indicates that you exceeded your account limits for the organization, -contact Amazon Web Services Support. If you get an exception that indicates that the -operation failed because your organization is still initializing, wait one hour and then -try again. If the error persists, contact Amazon Web Services Support. Using -CreateAccount to create multiple temporary accounts isn't recommended. You can only close -an account from the Billing and Cost Management console, and you must be signed in as the -root user. For information on the requirements and process for closing an account, see -Closing a member account in your organization in the Organizations User Guide. When you -create a member account with this operation, you can choose whether to create the account -with the IAM User and Role Access to Billing Information switch enabled. If you enable it, -IAM users and roles that have appropriate permissions can view billing information for the -account. If you disable it, only the account root user can access billing information. For -information about how to disable this switch for an account, see Granting access to your -billing information and tools. +as a payment method is not automatically collected. If you must remove an account from your +organization later, you can do so only after you provide the missing information. For more +information, see Considerations before removing an account from an organization in the +Organizations User Guide. If you get an exception that indicates that you exceeded your +account limits for the organization, contact Amazon Web Services Support. If you get an +exception that indicates that the operation failed because your organization is still +initializing, wait one hour and then try again. If the error persists, contact Amazon Web +Services Support. Using CreateAccount to create multiple temporary accounts isn't +recommended. You can only close an account from the Billing and Cost Management console, +and you must be signed in as the root user. For information on the requirements and process +for closing an account, see Closing a member account in your organization in the +Organizations User Guide. When you create a member account with this operation, you can +choose whether to create the account with the IAM User and Role Access to Billing +Information switch enabled. If you enable it, IAM users and roles that have appropriate +permissions can view billing information for the account. If you disable it, only the +account root user can access billing information. For information about how to disable this +switch for an account, see Granting access to your billing information and tools. # Arguments - `account_name`: The friendly name of the member account. diff --git a/src/services/payment_cryptography_data.jl b/src/services/payment_cryptography_data.jl index cd9714f20c..08697fd465 100644 --- a/src/services/payment_cryptography_data.jl +++ b/src/services/payment_cryptography_data.jl @@ -32,8 +32,14 @@ operations: EncryptData GetPublicCertificate ImportKey - `cipher_text`: The ciphertext to decrypt. - `decryption_attributes`: The encryption key type and attributes for ciphertext decryption. - `key_identifier`: The keyARN of the encryption key that Amazon Web Services Payment - Cryptography uses for ciphertext decryption. + Cryptography uses for ciphertext decryption. When a WrappedKeyBlock is provided, this value + will be the identifier to the key wrapping key. Otherwise, it is the key identifier used to + perform the operation. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"WrappedKey"`: The WrappedKeyBlock containing the encryption key for ciphertext + decryption. """ function decrypt_data( CipherText, @@ -106,12 +112,18 @@ ImportKey ReEncryptData # Arguments - `encryption_attributes`: The encryption key type and attributes for plaintext encryption. - `key_identifier`: The keyARN of the encryption key that Amazon Web Services Payment - Cryptography uses for plaintext encryption. + Cryptography uses for plaintext encryption. When a WrappedKeyBlock is provided, this value + will be the identifier to the key wrapping key. Otherwise, it is the key identifier used to + perform the operation. - `plain_text`: The plaintext to be encrypted. For encryption using asymmetric keys, plaintext data length is constrained by encryption key strength that you define in KeyAlgorithm and padding type that you define in AsymmetricEncryptionAttributes. For more information, see Encrypt data in the Amazon Web Services Payment Cryptography User Guide. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"WrappedKey"`: The WrappedKeyBlock containing the encryption key for plaintext + encryption. """ function encrypt_data( EncryptionAttributes, @@ -396,32 +408,37 @@ end re_encrypt_data(cipher_text, incoming_encryption_attributes, incoming_key_identifier, outgoing_encryption_attributes, outgoing_key_identifier) re_encrypt_data(cipher_text, incoming_encryption_attributes, incoming_key_identifier, outgoing_encryption_attributes, outgoing_key_identifier, params::Dict{String,<:Any}) -Re-encrypt ciphertext using DUKPT, Symmetric and Asymmetric Data Encryption Keys. You can -either generate an encryption key within Amazon Web Services Payment Cryptography by -calling CreateKey or import your own encryption key by calling ImportKey. The KeyArn for -use with this operation must be in a compatible key state with KeyModesOfUse set to -Encrypt. In asymmetric encryption, ciphertext is encrypted using public component (imported -by calling ImportKey) of the asymmetric key pair created outside of Amazon Web Services -Payment Cryptography. For symmetric and DUKPT encryption, Amazon Web Services Payment -Cryptography supports TDES and AES algorithms. For asymmetric encryption, Amazon Web -Services Payment Cryptography supports RSA. To encrypt using DUKPT, a DUKPT key must -already exist within your account with KeyModesOfUse set to DeriveKey or a new DUKPT can be -generated by calling CreateKey. For information about valid keys for this operation, see -Understanding key attributes and Key types for specific data operations in the Amazon Web -Services Payment Cryptography User Guide. Cross-account use: This operation can't be used -across different Amazon Web Services accounts. Related operations: DecryptData -EncryptData GetPublicCertificate ImportKey +Re-encrypt ciphertext using DUKPT or Symmetric data encryption keys. You can either +generate an encryption key within Amazon Web Services Payment Cryptography by calling +CreateKey or import your own encryption key by calling ImportKey. The KeyArn for use with +this operation must be in a compatible key state with KeyModesOfUse set to Encrypt. For +symmetric and DUKPT encryption, Amazon Web Services Payment Cryptography supports TDES and +AES algorithms. To encrypt using DUKPT, a DUKPT key must already exist within your account +with KeyModesOfUse set to DeriveKey or a new DUKPT can be generated by calling CreateKey. +For information about valid keys for this operation, see Understanding key attributes and +Key types for specific data operations in the Amazon Web Services Payment Cryptography User +Guide. Cross-account use: This operation can't be used across different Amazon Web +Services accounts. Related operations: DecryptData EncryptData +GetPublicCertificate ImportKey # Arguments - `cipher_text`: Ciphertext to be encrypted. The minimum allowed length is 16 bytes and maximum allowed length is 4096 bytes. - `incoming_encryption_attributes`: The attributes and values for incoming ciphertext. - `incoming_key_identifier`: The keyARN of the encryption key of incoming ciphertext data. + When a WrappedKeyBlock is provided, this value will be the identifier to the key wrapping + key. Otherwise, it is the key identifier used to perform the operation. - `outgoing_encryption_attributes`: The attributes and values for outgoing ciphertext data after encryption by Amazon Web Services Payment Cryptography. - `outgoing_key_identifier`: The keyARN of the encryption key of outgoing ciphertext data after encryption by Amazon Web Services Payment Cryptography. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"IncomingWrappedKey"`: The WrappedKeyBlock containing the encryption key of incoming + ciphertext data. +- `"OutgoingWrappedKey"`: The WrappedKeyBlock containing the encryption key of outgoing + ciphertext data after encryption by Amazon Web Services Payment Cryptography. """ function re_encrypt_data( CipherText, @@ -500,7 +517,9 @@ operations: GeneratePinData VerifyPinData - `encrypted_pin_block`: The encrypted PIN block data that Amazon Web Services Payment Cryptography translates. - `incoming_key_identifier`: The keyARN of the encryption key under which incoming PIN - block data is encrypted. This key type can be PEK or BDK. + block data is encrypted. This key type can be PEK or BDK. When a WrappedKeyBlock is + provided, this value will be the identifier to the key wrapping key for PIN block. + Otherwise, it is the key identifier used to perform the operation. - `incoming_translation_attributes`: The format of the incoming PIN block data for translation within Amazon Web Services Payment Cryptography. - `outgoing_key_identifier`: The keyARN of the encryption key for encrypting outgoing PIN @@ -512,8 +531,12 @@ operations: GeneratePinData VerifyPinData Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"IncomingDukptAttributes"`: The attributes and values to use for incoming DUKPT encryption key for PIN block translation. +- `"IncomingWrappedKey"`: The WrappedKeyBlock containing the encryption key under which + incoming PIN block data is encrypted. - `"OutgoingDukptAttributes"`: The attributes and values to use for outgoing DUKPT encryption key after PIN block translation. +- `"OutgoingWrappedKey"`: The WrappedKeyBlock containing the encryption key for encrypting + outgoing PIN block data. """ function translate_pin_data( EncryptedPinBlock, diff --git a/src/services/pcs.jl b/src/services/pcs.jl new file mode 100644 index 0000000000..8b955cc6fd --- /dev/null +++ b/src/services/pcs.jl @@ -0,0 +1,991 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: pcs +using AWS.Compat +using AWS.UUIDs + +""" + create_cluster(cluster_name, networking, scheduler, size) + create_cluster(cluster_name, networking, scheduler, size, params::Dict{String,<:Any}) + +Creates a cluster in your account. Amazon Web Services PCS creates the cluster controller +in a service-owned account. The cluster controller communicates with the cluster resources +in your account. The subnets and security groups for the cluster must already exist before +you use this API action. It takes time for Amazon Web Services PCS to create the cluster. +The cluster is in a Creating state until it is ready to use. There can only be 1 cluster in +a Creating state per Amazon Web Services Region per Amazon Web Services account. +CreateCluster fails with a ServiceQuotaExceededException if there is already a cluster in a +Creating state. + +# Arguments +- `cluster_name`: A name to identify the cluster. Example: MyCluster +- `networking`: The networking configuration used to set up the cluster's control plane. +- `scheduler`: The cluster management and job scheduling software associated with the + cluster. +- `size`: A value that determines the maximum number of compute nodes in the cluster and + the maximum number of jobs (active and queued). SMALL: 32 compute nodes and 256 jobs + MEDIUM: 512 compute nodes and 8192 jobs LARGE: 2048 compute nodes and 16,384 jobs + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. Idempotency ensures that an API request completes only once. + With an idempotent request, if the original request completes successfully, the subsequent + retries with the same client token return the result from the original successful request + and they have no additional effect. If you don't specify a client token, the CLI and SDK + automatically generate 1 for you. +- `"slurmConfiguration"`: Additional options related to the Slurm scheduler. +- `"tags"`: 1 or more tags added to the resource. Each tag consists of a tag key and tag + value. The tag value is optional and can be an empty string. +""" +function create_cluster( + clusterName, + networking, + scheduler, + size; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pcs( + "CreateCluster", + Dict{String,Any}( + "clusterName" => clusterName, + "networking" => networking, + "scheduler" => scheduler, + "size" => size, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_cluster( + clusterName, + networking, + scheduler, + size, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pcs( + "CreateCluster", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "clusterName" => clusterName, + "networking" => networking, + "scheduler" => scheduler, + "size" => size, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_compute_node_group(cluster_identifier, compute_node_group_name, custom_launch_template, iam_instance_profile_arn, instance_configs, scaling_configuration, subnet_ids) + create_compute_node_group(cluster_identifier, compute_node_group_name, custom_launch_template, iam_instance_profile_arn, instance_configs, scaling_configuration, subnet_ids, params::Dict{String,<:Any}) + +Creates a managed set of compute nodes. You associate a compute node group with a cluster +through 1 or more Amazon Web Services PCS queues or as part of the login fleet. A compute +node group includes the definition of the compute properties and lifecycle management. +Amazon Web Services PCS uses the information you provide to this API action to launch +compute nodes in your account. You can only specify subnets in the same Amazon VPC as your +cluster. You receive billing charges for the compute nodes that Amazon Web Services PCS +launches in your account. You must already have a launch template before you call this API. +For more information, see Launch an instance from a launch template in the Amazon Elastic +Compute Cloud User Guide for Linux Instances. + +# Arguments +- `cluster_identifier`: The name or ID of the cluster to create a compute node group in. +- `compute_node_group_name`: A name to identify the cluster. Example: MyCluster +- `custom_launch_template`: +- `iam_instance_profile_arn`: The Amazon Resource Name (ARN) of the IAM instance profile + used to pass an IAM role when launching EC2 instances. The role contained in your instance + profile must have pcs:RegisterComputeNodeGroupInstance permissions attached in order to + provision instances correctly. The resource identifier of the ARN must start with AWSPCS. + For example, arn:aws:iam:123456789012:instance-profile/AWSPCSMyComputeNodeInstanceProfile. +- `instance_configs`: A list of EC2 instance configurations that Amazon Web Services PCS + can provision in the compute node group. +- `scaling_configuration`: Specifies the boundaries of the compute node group auto scaling. +- `subnet_ids`: The list of subnet IDs where the compute node group launches instances. + Subnets must be in the same VPC as the cluster. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"amiId"`: The ID of the Amazon Machine Image (AMI) that Amazon Web Services PCS uses to + launch compute nodes (Amazon EC2 instances). If you don't provide this value, Amazon Web + Services PCS uses the AMI ID specified in the custom launch template. +- `"clientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. Idempotency ensures that an API request completes only once. + With an idempotent request, if the original request completes successfully, the subsequent + retries with the same client token return the result from the original successful request + and they have no additional effect. If you don't specify a client token, the CLI and SDK + automatically generate 1 for you. +- `"purchaseOption"`: Specifies how EC2 instances are purchased on your behalf. Amazon Web + Services PCS supports On-Demand and Spot instances. For more information, see Instance + purchasing options in the Amazon Elastic Compute Cloud User Guide. If you don't provide + this option, it defaults to On-Demand. +- `"slurmConfiguration"`: Additional options related to the Slurm scheduler. +- `"spotOptions"`: +- `"tags"`: 1 or more tags added to the resource. Each tag consists of a tag key and tag + value. The tag value is optional and can be an empty string. +""" +function create_compute_node_group( + clusterIdentifier, + computeNodeGroupName, + customLaunchTemplate, + iamInstanceProfileArn, + instanceConfigs, + scalingConfiguration, + subnetIds; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pcs( + "CreateComputeNodeGroup", + Dict{String,Any}( + "clusterIdentifier" => clusterIdentifier, + "computeNodeGroupName" => computeNodeGroupName, + "customLaunchTemplate" => customLaunchTemplate, + "iamInstanceProfileArn" => iamInstanceProfileArn, + "instanceConfigs" => instanceConfigs, + "scalingConfiguration" => scalingConfiguration, + "subnetIds" => subnetIds, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_compute_node_group( + clusterIdentifier, + computeNodeGroupName, + customLaunchTemplate, + iamInstanceProfileArn, + instanceConfigs, + scalingConfiguration, + subnetIds, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pcs( + "CreateComputeNodeGroup", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "clusterIdentifier" => clusterIdentifier, + "computeNodeGroupName" => computeNodeGroupName, + "customLaunchTemplate" => customLaunchTemplate, + "iamInstanceProfileArn" => iamInstanceProfileArn, + "instanceConfigs" => instanceConfigs, + "scalingConfiguration" => scalingConfiguration, + "subnetIds" => subnetIds, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_queue(cluster_identifier, queue_name) + create_queue(cluster_identifier, queue_name, params::Dict{String,<:Any}) + +Creates a job queue. You must associate 1 or more compute node groups with the queue. You +can associate 1 compute node group with multiple queues. + +# Arguments +- `cluster_identifier`: The name or ID of the cluster for which to create a queue. +- `queue_name`: A name to identify the queue. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. Idempotency ensures that an API request completes only once. + With an idempotent request, if the original request completes successfully, the subsequent + retries with the same client token return the result from the original successful request + and they have no additional effect. If you don't specify a client token, the CLI and SDK + automatically generate 1 for you. +- `"computeNodeGroupConfigurations"`: The list of compute node group configurations to + associate with the queue. Queues assign jobs to associated compute node groups. +- `"tags"`: 1 or more tags added to the resource. Each tag consists of a tag key and tag + value. The tag value is optional and can be an empty string. +""" +function create_queue( + clusterIdentifier, queueName; aws_config::AbstractAWSConfig=global_aws_config() +) + return pcs( + "CreateQueue", + Dict{String,Any}( + "clusterIdentifier" => clusterIdentifier, + "queueName" => queueName, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_queue( + clusterIdentifier, + queueName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pcs( + "CreateQueue", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "clusterIdentifier" => clusterIdentifier, + "queueName" => queueName, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_cluster(cluster_identifier) + delete_cluster(cluster_identifier, params::Dict{String,<:Any}) + +Deletes a cluster and all its linked resources. You must delete all queues and compute node +groups associated with the cluster before you can delete the cluster. + +# Arguments +- `cluster_identifier`: The name or ID of the cluster to delete. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. Idempotency ensures that an API request completes only once. + With an idempotent request, if the original request completes successfully, the subsequent + retries with the same client token return the result from the original successful request + and they have no additional effect. If you don't specify a client token, the CLI and SDK + automatically generate 1 for you. +""" +function delete_cluster( + clusterIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return pcs( + "DeleteCluster", + Dict{String,Any}( + "clusterIdentifier" => clusterIdentifier, "clientToken" => string(uuid4()) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_cluster( + clusterIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pcs( + "DeleteCluster", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "clusterIdentifier" => clusterIdentifier, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_compute_node_group(cluster_identifier, compute_node_group_identifier) + delete_compute_node_group(cluster_identifier, compute_node_group_identifier, params::Dict{String,<:Any}) + +Deletes a compute node group. You must delete all queues associated with the compute node +group first. + +# Arguments +- `cluster_identifier`: The name or ID of the cluster of the compute node group. +- `compute_node_group_identifier`: The name or ID of the compute node group to delete. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. Idempotency ensures that an API request completes only once. + With an idempotent request, if the original request completes successfully, the subsequent + retries with the same client token return the result from the original successful request + and they have no additional effect. If you don't specify a client token, the CLI and SDK + automatically generate 1 for you. +""" +function delete_compute_node_group( + clusterIdentifier, + computeNodeGroupIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pcs( + "DeleteComputeNodeGroup", + Dict{String,Any}( + "clusterIdentifier" => clusterIdentifier, + "computeNodeGroupIdentifier" => computeNodeGroupIdentifier, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_compute_node_group( + clusterIdentifier, + computeNodeGroupIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pcs( + "DeleteComputeNodeGroup", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "clusterIdentifier" => clusterIdentifier, + "computeNodeGroupIdentifier" => computeNodeGroupIdentifier, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_queue(cluster_identifier, queue_identifier) + delete_queue(cluster_identifier, queue_identifier, params::Dict{String,<:Any}) + +Deletes a job queue. If the compute node group associated with this queue isn't associated +with any other queues, Amazon Web Services PCS terminates all the compute nodes for this +queue. + +# Arguments +- `cluster_identifier`: The name or ID of the cluster of the queue. +- `queue_identifier`: The name or ID of the queue to delete. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. Idempotency ensures that an API request completes only once. + With an idempotent request, if the original request completes successfully, the subsequent + retries with the same client token return the result from the original successful request + and they have no additional effect. If you don't specify a client token, the CLI and SDK + automatically generate 1 for you. +""" +function delete_queue( + clusterIdentifier, queueIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return pcs( + "DeleteQueue", + Dict{String,Any}( + "clusterIdentifier" => clusterIdentifier, + "queueIdentifier" => queueIdentifier, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_queue( + clusterIdentifier, + queueIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pcs( + "DeleteQueue", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "clusterIdentifier" => clusterIdentifier, + "queueIdentifier" => queueIdentifier, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_cluster(cluster_identifier) + get_cluster(cluster_identifier, params::Dict{String,<:Any}) + +Returns detailed information about a running cluster in your account. This API action +provides networking information, endpoint information for communication with the scheduler, +and provisioning status. + +# Arguments +- `cluster_identifier`: The name or ID of the cluster of the queue. + +""" +function get_cluster(clusterIdentifier; aws_config::AbstractAWSConfig=global_aws_config()) + return pcs( + "GetCluster", + Dict{String,Any}("clusterIdentifier" => clusterIdentifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_cluster( + clusterIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pcs( + "GetCluster", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("clusterIdentifier" => clusterIdentifier), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_compute_node_group(cluster_identifier, compute_node_group_identifier) + get_compute_node_group(cluster_identifier, compute_node_group_identifier, params::Dict{String,<:Any}) + +Returns detailed information about a compute node group. This API action provides +networking information, EC2 instance type, compute node group status, and scheduler (such +as Slurm) configuration. + +# Arguments +- `cluster_identifier`: The name or ID of the cluster. +- `compute_node_group_identifier`: The name or ID of the compute node group. + +""" +function get_compute_node_group( + clusterIdentifier, + computeNodeGroupIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pcs( + "GetComputeNodeGroup", + Dict{String,Any}( + "clusterIdentifier" => clusterIdentifier, + "computeNodeGroupIdentifier" => computeNodeGroupIdentifier, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_compute_node_group( + clusterIdentifier, + computeNodeGroupIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pcs( + "GetComputeNodeGroup", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "clusterIdentifier" => clusterIdentifier, + "computeNodeGroupIdentifier" => computeNodeGroupIdentifier, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_queue(cluster_identifier, queue_identifier) + get_queue(cluster_identifier, queue_identifier, params::Dict{String,<:Any}) + +Returns detailed information about a queue. The information includes the compute node +groups that the queue uses to schedule jobs. + +# Arguments +- `cluster_identifier`: The name or ID of the cluster of the queue. +- `queue_identifier`: The name or ID of the queue. + +""" +function get_queue( + clusterIdentifier, queueIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return pcs( + "GetQueue", + Dict{String,Any}( + "clusterIdentifier" => clusterIdentifier, "queueIdentifier" => queueIdentifier + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_queue( + clusterIdentifier, + queueIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pcs( + "GetQueue", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "clusterIdentifier" => clusterIdentifier, + "queueIdentifier" => queueIdentifier, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_clusters() + list_clusters(params::Dict{String,<:Any}) + +Returns a list of running clusters in your account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results that are returned per call. You can use + nextToken to obtain further pages of results. The default is 10 results, and the maximum + allowed page size is 100 results. A value of 0 uses the default. +- `"nextToken"`: The value of nextToken is a unique pagination token for each page of + results returned. If nextToken is returned, there are more results available. Make the call + again using the returned token to retrieve the next page. Keep all other arguments + unchanged. Each pagination token expires after 24 hours. Using an expired pagination token + returns an HTTP 400 InvalidToken error. +""" +function list_clusters(; aws_config::AbstractAWSConfig=global_aws_config()) + return pcs("ListClusters"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) +end +function list_clusters( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return pcs( + "ListClusters", params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end + +""" + list_compute_node_groups(cluster_identifier) + list_compute_node_groups(cluster_identifier, params::Dict{String,<:Any}) + +Returns a list of all compute node groups associated with a cluster. + +# Arguments +- `cluster_identifier`: The name or ID of the cluster to list compute node groups for. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results that are returned per call. You can use + nextToken to obtain further pages of results. The default is 10 results, and the maximum + allowed page size is 100 results. A value of 0 uses the default. +- `"nextToken"`: The value of nextToken is a unique pagination token for each page of + results returned. If nextToken is returned, there are more results available. Make the call + again using the returned token to retrieve the next page. Keep all other arguments + unchanged. Each pagination token expires after 24 hours. Using an expired pagination token + returns an HTTP 400 InvalidToken error. +""" +function list_compute_node_groups( + clusterIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return pcs( + "ListComputeNodeGroups", + Dict{String,Any}("clusterIdentifier" => clusterIdentifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_compute_node_groups( + clusterIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pcs( + "ListComputeNodeGroups", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("clusterIdentifier" => clusterIdentifier), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_queues(cluster_identifier) + list_queues(cluster_identifier, params::Dict{String,<:Any}) + +Returns a list of all queues associated with a cluster. + +# Arguments +- `cluster_identifier`: The name or ID of the cluster to list queues for. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results that are returned per call. You can use + nextToken to obtain further pages of results. The default is 10 results, and the maximum + allowed page size is 100 results. A value of 0 uses the default. +- `"nextToken"`: The value of nextToken is a unique pagination token for each page of + results returned. If nextToken is returned, there are more results available. Make the call + again using the returned token to retrieve the next page. Keep all other arguments + unchanged. Each pagination token expires after 24 hours. Using an expired pagination token + returns an HTTP 400 InvalidToken error. +""" +function list_queues(clusterIdentifier; aws_config::AbstractAWSConfig=global_aws_config()) + return pcs( + "ListQueues", + Dict{String,Any}("clusterIdentifier" => clusterIdentifier); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_queues( + clusterIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pcs( + "ListQueues", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("clusterIdentifier" => clusterIdentifier), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Returns a list of all tags on an Amazon Web Services PCS resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource for which to list tags. + +""" +function list_tags_for_resource( + resourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return pcs( + "ListTagsForResource", + Dict{String,Any}("resourceArn" => resourceArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + resourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pcs( + "ListTagsForResource", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("resourceArn" => resourceArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + register_compute_node_group_instance(bootstrap_id, cluster_identifier) + register_compute_node_group_instance(bootstrap_id, cluster_identifier, params::Dict{String,<:Any}) + + This API action isn't intended for you to use. Amazon Web Services PCS uses this API +action to register the compute nodes it launches in your account. + +# Arguments +- `bootstrap_id`: The client-generated token to allow for retries. +- `cluster_identifier`: The name or ID of the cluster to register the compute node group + instance in. + +""" +function register_compute_node_group_instance( + bootstrapId, clusterIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return pcs( + "RegisterComputeNodeGroupInstance", + Dict{String,Any}( + "bootstrapId" => bootstrapId, "clusterIdentifier" => clusterIdentifier + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function register_compute_node_group_instance( + bootstrapId, + clusterIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pcs( + "RegisterComputeNodeGroupInstance", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "bootstrapId" => bootstrapId, "clusterIdentifier" => clusterIdentifier + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Adds or edits tags on an Amazon Web Services PCS resource. Each tag consists of a tag key +and a tag value. The tag key and tag value are case-sensitive strings. The tag value can be +an empty (null) string. To add a tag, specify a new tag key and a tag value. To edit a tag, +specify an existing tag key and a new tag value. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource. +- `tags`: 1 or more tags added to the resource. Each tag consists of a tag key and tag + value. The tag value is optional and can be an empty string. + +""" +function tag_resource(resourceArn, tags; aws_config::AbstractAWSConfig=global_aws_config()) + return pcs( + "TagResource", + Dict{String,Any}("resourceArn" => resourceArn, "tags" => tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + resourceArn, + tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pcs( + "TagResource", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("resourceArn" => resourceArn, "tags" => tags), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Deletes tags from an Amazon Web Services PCS resource. To delete a tag, specify the tag key +and the Amazon Resource Name (ARN) of the Amazon Web Services PCS resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource. +- `tag_keys`: 1 or more tag keys to remove from the resource. Specify only tag keys and not + tag values. + +""" +function untag_resource( + resourceArn, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return pcs( + "UntagResource", + Dict{String,Any}("resourceArn" => resourceArn, "tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + resourceArn, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pcs( + "UntagResource", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("resourceArn" => resourceArn, "tagKeys" => tagKeys), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_compute_node_group(cluster_identifier, compute_node_group_identifier) + update_compute_node_group(cluster_identifier, compute_node_group_identifier, params::Dict{String,<:Any}) + +Updates a compute node group. You can update many of the fields related to your compute +node group including the configurations for networking, compute nodes, and settings +specific to your scheduler (such as Slurm). + +# Arguments +- `cluster_identifier`: The name or ID of the cluster of the compute node group. +- `compute_node_group_identifier`: The name or ID of the compute node group. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"amiId"`: The ID of the Amazon Machine Image (AMI) that Amazon Web Services PCS uses to + launch instances. If not provided, Amazon Web Services PCS uses the AMI ID specified in the + custom launch template. +- `"clientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. Idempotency ensures that an API request completes only once. + With an idempotent request, if the original request completes successfully, the subsequent + retries with the same client token return the result from the original successful request + and they have no additional effect. If you don't specify a client token, the CLI and SDK + automatically generate 1 for you. +- `"customLaunchTemplate"`: +- `"iamInstanceProfileArn"`: The Amazon Resource Name (ARN) of the IAM instance profile + used to pass an IAM role when launching EC2 instances. The role contained in your instance + profile must have pcs:RegisterComputeNodeGroupInstance permissions attached to provision + instances correctly. +- `"purchaseOption"`: Specifies how EC2 instances are purchased on your behalf. Amazon Web + Services PCS supports On-Demand and Spot instances. For more information, see Instance + purchasing options in the Amazon Elastic Compute Cloud User Guide. If you don't provide + this option, it defaults to On-Demand. +- `"scalingConfiguration"`: Specifies the boundaries of the compute node group auto scaling. +- `"slurmConfiguration"`: Additional options related to the Slurm scheduler. +- `"spotOptions"`: +- `"subnetIds"`: The list of subnet IDs where the compute node group provisions instances. + The subnets must be in the same VPC as the cluster. +""" +function update_compute_node_group( + clusterIdentifier, + computeNodeGroupIdentifier; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pcs( + "UpdateComputeNodeGroup", + Dict{String,Any}( + "clusterIdentifier" => clusterIdentifier, + "computeNodeGroupIdentifier" => computeNodeGroupIdentifier, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_compute_node_group( + clusterIdentifier, + computeNodeGroupIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pcs( + "UpdateComputeNodeGroup", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "clusterIdentifier" => clusterIdentifier, + "computeNodeGroupIdentifier" => computeNodeGroupIdentifier, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_queue(cluster_identifier, queue_identifier) + update_queue(cluster_identifier, queue_identifier, params::Dict{String,<:Any}) + +Updates the compute node group configuration of a queue. Use this API to change the compute +node groups that the queue can send jobs to. + +# Arguments +- `cluster_identifier`: The name or ID of the cluster of the queue. +- `queue_identifier`: The name or ID of the queue. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. Idempotency ensures that an API request completes only once. + With an idempotent request, if the original request completes successfully, the subsequent + retries with the same client token return the result from the original successful request + and they have no additional effect. If you don't specify a client token, the CLI and SDK + automatically generate 1 for you. +- `"computeNodeGroupConfigurations"`: The list of compute node group configurations to + associate with the queue. Queues assign jobs to associated compute node groups. +""" +function update_queue( + clusterIdentifier, queueIdentifier; aws_config::AbstractAWSConfig=global_aws_config() +) + return pcs( + "UpdateQueue", + Dict{String,Any}( + "clusterIdentifier" => clusterIdentifier, + "queueIdentifier" => queueIdentifier, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_queue( + clusterIdentifier, + queueIdentifier, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return pcs( + "UpdateQueue", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "clusterIdentifier" => clusterIdentifier, + "queueIdentifier" => queueIdentifier, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/personalize.jl b/src/services/personalize.jl index ed834528be..efeaddd27b 100644 --- a/src/services/personalize.jl +++ b/src/services/personalize.jl @@ -951,34 +951,32 @@ end create_solution(dataset_group_arn, name) create_solution(dataset_group_arn, name, params::Dict{String,<:Any}) - After you create a solution, you can’t change its configuration. By default, all new -solutions use automatic training. With automatic training, you incur training costs while -your solution is active. You can't stop automatic training for a solution. To avoid -unnecessary costs, make sure to delete the solution when you are finished. For information -about training costs, see Amazon Personalize pricing. Creates the configuration for -training a model (creating a solution version). This configuration includes the recipe to -use for model training and optional training configuration, such as columns to use in -training and feature transformation parameters. For more information about configuring a -solution, see Creating and configuring a solution. By default, new solutions use -automatic training to create solution versions every 7 days. You can change the training -frequency. Automatic solution version creation starts one hour after the solution is -ACTIVE. If you manually create a solution version within the hour, the solution skips the -first automatic training. For more information, see Configuring automatic training. To -turn off automatic training, set performAutoTraining to false. If you turn off automatic -training, you must manually create a solution version by calling the CreateSolutionVersion -operation. After training starts, you can get the solution version's Amazon Resource Name -(ARN) with the ListSolutionVersions API operation. To get its status, use the -DescribeSolutionVersion. After training completes you can evaluate model accuracy by -calling GetSolutionMetrics. When you are satisfied with the solution version, you deploy it -using CreateCampaign. The campaign provides recommendations to a client through the -GetRecommendations API. Amazon Personalize doesn't support configuring the hpoObjective -for solution hyperparameter optimization at this time. Status A solution can be in one -of the following states: CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE -FAILED DELETE PENDING > DELETE IN_PROGRESS To get the status of the solution, call -DescribeSolution. If you use manual training, the status must be ACTIVE before you call -CreateSolutionVersion. Related APIs ListSolutions CreateSolutionVersion -DescribeSolution DeleteSolution ListSolutionVersions DescribeSolutionVersion - + By default, all new solutions use automatic training. With automatic training, you incur +training costs while your solution is active. To avoid unnecessary costs, when you are +finished you can update the solution to turn off automatic training. For information about +training costs, see Amazon Personalize pricing. Creates the configuration for training a +model (creating a solution version). This configuration includes the recipe to use for +model training and optional training configuration, such as columns to use in training and +feature transformation parameters. For more information about configuring a solution, see +Creating and configuring a solution. By default, new solutions use automatic training to +create solution versions every 7 days. You can change the training frequency. Automatic +solution version creation starts within one hour after the solution is ACTIVE. If you +manually create a solution version within the hour, the solution skips the first automatic +training. For more information, see Configuring automatic training. To turn off automatic +training, set performAutoTraining to false. If you turn off automatic training, you must +manually create a solution version by calling the CreateSolutionVersion operation. After +training starts, you can get the solution version's Amazon Resource Name (ARN) with the +ListSolutionVersions API operation. To get its status, use the DescribeSolutionVersion. +After training completes you can evaluate model accuracy by calling GetSolutionMetrics. +When you are satisfied with the solution version, you deploy it using CreateCampaign. The +campaign provides recommendations to a client through the GetRecommendations API. Amazon +Personalize doesn't support configuring the hpoObjective for solution hyperparameter +optimization at this time. Status A solution can be in one of the following states: +CREATE PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED DELETE PENDING > +DELETE IN_PROGRESS To get the status of the solution, call DescribeSolution. If you use +manual training, the status must be ACTIVE before you call CreateSolutionVersion. Related +APIs UpdateSolution ListSolutions CreateSolutionVersion DescribeSolution + DeleteSolution ListSolutionVersions DescribeSolutionVersion # Arguments - `dataset_group_arn`: The Amazon Resource Name (ARN) of the dataset group that provides @@ -1004,7 +1002,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys creates new solution versions every 7 days. You can change the training frequency by specifying a schedulingExpression in the AutoTrainingConfig as part of solution configuration. For more information about automatic training, see Configuring automatic - training. Automatic solution version creation starts one hour after the solution is + training. Automatic solution version creation starts within one hour after the solution is ACTIVE. If you manually create a solution version within the hour, the solution skips the first automatic training. After training starts, you can get the solution version's Amazon Resource Name (ARN) with the ListSolutionVersions API operation. To get its status, @@ -1015,8 +1013,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"recipeArn"`: The Amazon Resource Name (ARN) of the recipe to use for model training. This is required when performAutoML is false. For information about different Amazon Personalize recipes and their ARNs, see Choosing a recipe. -- `"solutionConfig"`: The configuration to use with the solution. When performAutoML is set - to true, Amazon Personalize only evaluates the autoMLConfig section of the solution +- `"solutionConfig"`: The configuration properties for the solution. When performAutoML is + set to true, Amazon Personalize only evaluates the autoMLConfig section of the solution configuration. Amazon Personalize doesn't support configuring the hpoObjective at this time. - `"tags"`: A list of tags to apply to the solution. @@ -3079,3 +3077,54 @@ function update_recommender( feature_set=SERVICE_FEATURE_SET, ) end + +""" + update_solution(solution_arn) + update_solution(solution_arn, params::Dict{String,<:Any}) + +Updates an Amazon Personalize solution to use a different automatic training configuration. +When you update a solution, you can change whether the solution uses automatic training, +and you can change the training frequency. For more information about updating a solution, +see Updating a solution. A solution update can be in one of the following states: CREATE +PENDING > CREATE IN_PROGRESS > ACTIVE -or- CREATE FAILED To get the status of a +solution update, call the DescribeSolution API operation and find the status in the +latestSolutionUpdate. + +# Arguments +- `solution_arn`: The Amazon Resource Name (ARN) of the solution to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"performAutoTraining"`: Whether the solution uses automatic training to create new + solution versions (trained models). You can change the training frequency by specifying a + schedulingExpression in the AutoTrainingConfig as part of solution configuration. If you + turn on automatic training, the first automatic training starts within one hour after the + solution update completes. If you manually create a solution version within the hour, the + solution skips the first automatic training. For more information about automatic training, + see Configuring automatic training. After training starts, you can get the solution + version's Amazon Resource Name (ARN) with the ListSolutionVersions API operation. To get + its status, use the DescribeSolutionVersion. +- `"solutionUpdateConfig"`: The new configuration details of the solution. +""" +function update_solution(solutionArn; aws_config::AbstractAWSConfig=global_aws_config()) + return personalize( + "UpdateSolution", + Dict{String,Any}("solutionArn" => solutionArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_solution( + solutionArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return personalize( + "UpdateSolution", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("solutionArn" => solutionArn), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/pi.jl b/src/services/pi.jl index 8d1ac9c6e8..5941314693 100644 --- a/src/services/pi.jl +++ b/src/services/pi.jl @@ -173,7 +173,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys \"AdditionalMetrics\" : { \"string\" : \"string\" }. - `"Filter"`: One or more filters to apply in the request. Restrictions: Any number of filters by the same dimension, as specified in the GroupBy or Partition parameters. A - single filter for any other dimension in this dimension group. + single filter for any other dimension in this dimension group. The db.sql.db_id filter + isn't available for RDS for SQL Server DB instances. - `"MaxResults"`: The maximum number of items to return in the response. If more items exist than the specified MaxRecords value, a pagination token is included in the response so that the remaining results can be retrieved. diff --git a/src/services/pinpoint_sms_voice_v2.jl b/src/services/pinpoint_sms_voice_v2.jl index f9c6893414..0ce86979f2 100644 --- a/src/services/pinpoint_sms_voice_v2.jl +++ b/src/services/pinpoint_sms_voice_v2.jl @@ -184,12 +184,12 @@ end create_event_destination(configuration_set_name, event_destination_name, matching_event_types, params::Dict{String,<:Any}) Creates a new event destination in a configuration set. An event destination is a location -where you send message events. The event options are Amazon CloudWatch, Amazon Kinesis Data +where you send message events. The event options are Amazon CloudWatch, Amazon Data Firehose, or Amazon SNS. For example, when a message is delivered successfully, you can send information about that event to an event destination, or send notifications to endpoints that are subscribed to an Amazon SNS topic. Each configuration set can contain between 0 and 5 event destinations. Each event destination can contain a reference to a -single destination, such as a CloudWatch or Kinesis Data Firehose destination. +single destination, such as a CloudWatch or Firehose destination. # Arguments - `configuration_set_name`: Either the name of the configuration set or the configuration @@ -197,8 +197,8 @@ single destination, such as a CloudWatch or Kinesis Data Firehose destination. found using the DescribeConfigurationSets action. - `event_destination_name`: The name that identifies the event destination. - `matching_event_types`: An array of event types that determine which events to log. If - \"ALL\" is used, then Amazon Pinpoint logs every event type. The TEXT_SENT event type is - not supported. + \"ALL\" is used, then AWS End User Messaging SMS and Voice logs every event type. The + TEXT_SENT event type is not supported. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -208,7 +208,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"CloudWatchLogsDestination"`: An object that contains information about an event destination for logging to Amazon CloudWatch Logs. - `"KinesisFirehoseDestination"`: An object that contains information about an event - destination for logging to Amazon Kinesis Data Firehose. + destination for logging to Amazon Data Firehose. - `"SnsDestination"`: An object that contains information about an event destination for logging to Amazon SNS. """ @@ -265,7 +265,7 @@ An opt-out list is a list of phone numbers that are opted out, meaning you can't or voice messages to them. If end user replies with the keyword \"STOP,\" an entry for the phone number is added to the opt-out list. In addition to STOP, your recipients can use any supported opt-out keyword, such as CANCEL or OPTOUT. For a list of supported opt-out -keywords, see SMS opt out in the Amazon Pinpoint User Guide. +keywords, see SMS opt out in the AWS End User Messaging SMS User Guide. # Arguments - `opt_out_list_name`: The name of the new OptOutList. @@ -328,11 +328,12 @@ be associated with multiple pools. country or region of the new pool. - `message_type`: The type of message. Valid values are TRANSACTIONAL for messages that are critical or time-sensitive and PROMOTIONAL for messages that aren't critical or - time-sensitive. + time-sensitive. After the pool is created the MessageType can't be changed. - `origination_identity`: The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or SenderIdArn. You can use DescribePhoneNumbers to find the values for PhoneNumberId and PhoneNumberArn while DescribeSenderIds can be used to get the - values for SenderId and SenderIdArn. + values for SenderId and SenderIdArn. After the pool is created you can add more origination + identities to the pool by using AssociateOriginationIdentity. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -871,8 +872,8 @@ Deletes an existing keyword from an origination phone number or pool. A keyword that you can search for on a particular phone number or pool. It is also a specific word or phrase that an end user can send to your number to elicit a response, such as an informational message or a special offer. When your number receives a message that begins -with a keyword, Amazon Pinpoint responds with a customizable message. Keywords \"HELP\" and -\"STOP\" can't be deleted or modified. +with a keyword, AWS End User Messaging SMS and Voice responds with a customizable message. +Keywords \"HELP\" and \"STOP\" can't be deleted or modified. # Arguments - `keyword`: The keyword to delete. @@ -1230,8 +1231,8 @@ end Deletes an account-level monthly spending limit override for sending text messages. Deleting a spend limit override will set the EnforcedLimit to equal the MaxLimit, which is -controlled by Amazon Web Services. For more information on spend limits (quotas) see Amazon -Pinpoint quotas in the Amazon Pinpoint Developer Guide. +controlled by Amazon Web Services. For more information on spend limits (quotas) see Quotas + in the AWS End User Messaging SMS User Guide. """ function delete_text_message_spend_limit_override(; @@ -1302,8 +1303,8 @@ end Deletes an account level monthly spend limit override for sending voice messages. Deleting a spend limit override sets the EnforcedLimit equal to the MaxLimit, which is controlled by -Amazon Web Services. For more information on spending limits (quotas) see Amazon Pinpoint -quotas in the Amazon Pinpoint Developer Guide. +Amazon Web Services. For more information on spending limits (quotas) see Quotas in the +AWS End User Messaging SMS User Guide. """ function delete_voice_message_spend_limit_override(; @@ -1333,9 +1334,9 @@ end Describes attributes of your Amazon Web Services account. The supported account attributes include account tier, which indicates whether your account is in the sandbox or production environment. When you're ready to move your account out of the sandbox, create an Amazon -Web Services Support case for a service limit increase request. New Amazon Pinpoint -accounts are placed into an SMS or voice sandbox. The sandbox protects both Amazon Web -Services end recipients and SMS or voice recipients from fraud and abuse. +Web Services Support case for a service limit increase request. New accounts are placed +into an SMS or voice sandbox. The sandbox protects both Amazon Web Services end recipients +and SMS or voice recipients from fraud and abuse. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1363,12 +1364,12 @@ end describe_account_limits() describe_account_limits(params::Dict{String,<:Any}) -Describes the current Amazon Pinpoint SMS Voice V2 resource quotas for your account. The -description for a quota includes the quota name, current usage toward that quota, and the -quota's maximum value. When you establish an Amazon Web Services account, the account has -initial quotas on the maximum number of configuration sets, opt-out lists, phone numbers, -and pools that you can create in a given Region. For more information see Amazon Pinpoint -quotas in the Amazon Pinpoint Developer Guide. +Describes the current AWS End User Messaging SMS and Voice SMS Voice V2 resource quotas for +your account. The description for a quota includes the quota name, current usage toward +that quota, and the quota's maximum value. When you establish an Amazon Web Services +account, the account has initial quotas on the maximum number of configuration sets, +opt-out lists, phone numbers, and pools that you can create in a given Region. For more +information see Quotas in the AWS End User Messaging SMS User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -1436,8 +1437,8 @@ Describes the specified keywords or all keywords on your origination phone numbe A keyword is a word that you can search for on a particular phone number or pool. It is also a specific word or phrase that an end user can send to your number to elicit a response, such as an informational message or a special offer. When your number receives a -message that begins with a keyword, Amazon Pinpoint responds with a customizable message. -If you specify a keyword that isn't valid, an error is returned. +message that begins with a keyword, AWS End User Messaging SMS and Voice responds with a +customizable message. If you specify a keyword that isn't valid, an error is returned. # Arguments - `origination_identity`: The origination identity to use such as a PhoneNumberId, @@ -1987,11 +1988,11 @@ end describe_spend_limits() describe_spend_limits(params::Dict{String,<:Any}) -Describes the current Amazon Pinpoint monthly spend limits for sending voice and text -messages. When you establish an Amazon Web Services account, the account has initial -monthly spend limit in a given Region. For more information on increasing your monthly -spend limit, see Requesting increases to your monthly SMS spending quota for Amazon -Pinpoint in the Amazon Pinpoint User Guide. +Describes the current monthly spend limits for sending voice and text messages. When you +establish an Amazon Web Services account, the account has initial monthly spend limit in a +given Region. For more information on increasing your monthly spend limit, see Requesting +increases to your monthly SMS, MMS, or Voice spending quota in the AWS End User Messaging +SMS User Guide. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2377,8 +2378,8 @@ Creates or updates a keyword configuration on an origination phone number or poo keyword is a word that you can search for on a particular phone number or pool. It is also a specific word or phrase that an end user can send to your number to elicit a response, such as an informational message or a special offer. When your number receives a message -that begins with a keyword, Amazon Pinpoint responds with a customizable message. If you -specify a keyword that isn't valid, an error is returned. +that begins with a keyword, AWS End User Messaging SMS and Voice responds with a +customizable message. If you specify a keyword that isn't valid, an error is returned. # Arguments - `keyword`: The new keyword to add. @@ -2618,7 +2619,7 @@ end request_phone_number(iso_country_code, message_type, number_capabilities, number_type, params::Dict{String,<:Any}) Request an origination phone number for use in your account. For more information on phone -number request see Requesting a number in the Amazon Pinpoint User Guide. +number request see Request a phone number in the AWS End User Messaging SMS User Guide. # Arguments - `iso_country_code`: The two-character code, in ISO 3166-1 alpha-2 format, for the country @@ -2892,11 +2893,12 @@ end send_text_message(destination_phone_number) send_text_message(destination_phone_number, params::Dict{String,<:Any}) -Creates a new text message and sends it to a recipient's phone number. SMS throughput -limits are measured in Message Parts per Second (MPS). Your MPS limit depends on the -destination country of your messages, as well as the type of phone number (origination -number) that you use to send the message. For more information, see Message Parts per -Second (MPS) limits in the Amazon Pinpoint User Guide. +Creates a new text message and sends it to a recipient's phone number. SendTextMessage only +sends an SMS message to one recipient each time it is invoked. SMS throughput limits are +measured in Message Parts per Second (MPS). Your MPS limit depends on the destination +country of your messages, as well as the type of phone number (origination number) that you +use to send the message. For more information about MPS, see Message Parts per Second (MPS) +limits in the AWS End User Messaging SMS User Guide. # Arguments - `destination_phone_number`: The destination phone number in E.164 format. @@ -2910,20 +2912,31 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DestinationCountryParameters"`: This field is used for any country-specific registration requirements. Currently, this setting is only used when you send messages to recipients in India using a sender ID. For more information see Special requirements for - sending SMS messages to recipients in India. + sending SMS messages to recipients in India. IN_ENTITY_ID The entity ID or Principal + Entity (PE) ID that you received after completing the sender ID registration process. + IN_TEMPLATE_ID The template ID that you received after completing the sender ID + registration process. Make sure that the Template ID that you specify matches your message + template exactly. If your message doesn't match the template that you provided during the + registration process, the mobile carriers might reject your message. - `"DryRun"`: When set to true, the message is checked and validated, but isn't sent to the - end recipient. + end recipient. You are not charged for using DryRun. The Message Parts per Second (MPS) + limit when using DryRun is five. If your origination identity has a lower MPS limit then + the lower MPS limit is used. For more information about MPS limits, see Message Parts per + Second (MPS) limits in the AWS End User Messaging SMS User Guide.. - `"Keyword"`: When you register a short code in the US, you must specify a program name. If you don’t have a US short code, omit this attribute. - `"MaxPrice"`: The maximum amount that you want to spend, in US dollars, per each text - message part. A text message can contain multiple parts. + message. If the calculated amount to send the text message is greater than MaxPrice, the + message is not sent and an error is returned. - `"MessageBody"`: The body of the text message. - `"MessageType"`: The type of message. Valid values are for messages that are critical or time-sensitive and PROMOTIONAL for messages that aren't critical or time-sensitive. - `"OriginationIdentity"`: The origination identity of the message. This can be either the PhoneNumber, PhoneNumberId, PhoneNumberArn, SenderId, SenderIdArn, PoolId, or PoolArn. - `"ProtectConfigurationId"`: The unique identifier for the protect configuration. -- `"TimeToLive"`: How long the text message is valid for. By default this is 72 hours. +- `"TimeToLive"`: How long the text message is valid for, in seconds. By default this is 72 + hours. If the messages isn't handed off before the TTL expires we stop attempting to hand + off the message and return TTL_EXPIRED event. """ function send_text_message( DestinationPhoneNumber; aws_config::AbstractAWSConfig=global_aws_config() @@ -2958,8 +2971,8 @@ end send_voice_message(destination_phone_number, origination_identity) send_voice_message(destination_phone_number, origination_identity, params::Dict{String,<:Any}) -Allows you to send a request that sends a voice message through Amazon Pinpoint. This -operation uses Amazon Polly to convert a text script into a voice message. +Allows you to send a request that sends a voice message. This operation uses Amazon Polly +to convert a text script into a voice message. # Arguments - `destination_phone_number`: The destination phone number in E.164 format. @@ -3320,11 +3333,11 @@ end tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) -Adds or overwrites only the specified tags for the specified Amazon Pinpoint SMS Voice, -version 2 resource. When you specify an existing tag key, the value is overwritten with the -new value. Each resource can have a maximum of 50 tags. Each tag consists of a key and an -optional value. Tag keys must be unique per resource. For more information about tags, see -Tagging Amazon Pinpoint resources in the Amazon Pinpoint Developer Guide. +Adds or overwrites only the specified tags for the specified resource. When you specify an +existing tag key, the value is overwritten with the new value. Each resource can have a +maximum of 50 tags. Each tag consists of a key and an optional value. Tag keys must be +unique per resource. For more information about tags, see Tags in the AWS End User +Messaging SMS User Guide. # Arguments - `resource_arn`: The Amazon Resource Name (ARN) of the resource. @@ -3363,9 +3376,8 @@ end untag_resource(resource_arn, tag_keys) untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) -Removes the association of the specified tags from an Amazon Pinpoint SMS Voice V2 -resource. For more information on tags see Tagging Amazon Pinpoint resources in the Amazon -Pinpoint Developer Guide. +Removes the association of the specified tags from a resource. For more information on tags +see Tags in the AWS End User Messaging SMS User Guide. # Arguments - `resource_arn`: The Amazon Resource Name (ARN) of the resource. @@ -3407,10 +3419,10 @@ end update_event_destination(configuration_set_name, event_destination_name, params::Dict{String,<:Any}) Updates an existing event destination in a configuration set. You can update the IAM role -ARN for CloudWatch Logs and Kinesis Data Firehose. You can also enable or disable the event -destination. You may want to update an event destination to change its matching event types -or updating the destination resource ARN. You can't change an event destination's type -between CloudWatch Logs, Kinesis Data Firehose, and Amazon SNS. +ARN for CloudWatch Logs and Firehose. You can also enable or disable the event destination. +You may want to update an event destination to change its matching event types or updating +the destination resource ARN. You can't change an event destination's type between +CloudWatch Logs, Firehose, and Amazon SNS. # Arguments - `configuration_set_name`: The configuration set to update with the new event destination. @@ -3423,7 +3435,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys destination that sends data to CloudWatch Logs. - `"Enabled"`: When set to true logging is enabled. - `"KinesisFirehoseDestination"`: An object that contains information about an event - destination for logging to Kinesis Data Firehose. + destination for logging to Firehose. - `"MatchingEventTypes"`: An array of event types that determine which events to log. The TEXT_SENT event type is not supported. - `"SnsDestination"`: An object that contains information about an event destination that @@ -3487,10 +3499,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"OptOutListName"`: The OptOutList to add the phone number to. Valid values for this field can be either the OutOutListName or OutOutListArn. - `"SelfManagedOptOutsEnabled"`: By default this is set to false. When an end recipient - sends a message that begins with HELP or STOP to one of your dedicated numbers, Amazon - Pinpoint automatically replies with a customizable message and adds the end recipient to - the OptOutList. When set to true you're responsible for responding to HELP and STOP - requests. You're also responsible for tracking and honoring opt-out requests. + sends a message that begins with HELP or STOP to one of your dedicated numbers, AWS End + User Messaging SMS and Voice automatically replies with a customizable message and adds the + end recipient to the OptOutList. When set to true you're responsible for responding to HELP + and STOP requests. You're also responsible for tracking and honoring opt-out requests. - `"TwoWayChannelArn"`: The Amazon Resource Name (ARN) of the two way channel. - `"TwoWayChannelRole"`: An optional IAM Role Arn for a service to assume, to be able to post inbound SMS messages. @@ -3540,10 +3552,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"OptOutListName"`: The OptOutList to associate with the pool. Valid values are either OptOutListName or OptOutListArn. - `"SelfManagedOptOutsEnabled"`: By default this is set to false. When an end recipient - sends a message that begins with HELP or STOP to one of your dedicated numbers, Amazon - Pinpoint automatically replies with a customizable message and adds the end recipient to - the OptOutList. When set to true you're responsible for responding to HELP and STOP - requests. You're also responsible for tracking and honoring opt-out requests. + sends a message that begins with HELP or STOP to one of your dedicated numbers, AWS End + User Messaging SMS and Voice automatically replies with a customizable message and adds the + end recipient to the OptOutList. When set to true you're responsible for responding to HELP + and STOP requests. You're also responsible for tracking and honoring opt-out requests. - `"SharedRoutesEnabled"`: Indicates whether shared routes are enabled for the pool. - `"TwoWayChannelArn"`: The Amazon Resource Name (ARN) of the two way channel. - `"TwoWayChannelRole"`: An optional IAM Role Arn for a service to assume, to be able to @@ -3625,7 +3637,7 @@ only applied to the specified NumberCapability type. - `country_rule_set_updates`: A map of ProtectConfigurationCountryRuleSetInformation objects that contain the details for the requested NumberCapability. The Key is the two-letter ISO country code. For a list of supported ISO country codes, see Supported - countries and regions (SMS channel) in the Amazon Pinpoint SMS user guide. + countries and regions (SMS channel) in the AWS End User Messaging SMS User Guide. - `number_capability`: The number capability to apply the CountryRuleSetUpdates updates to. - `protect_configuration_id`: The unique identifier for the protect configuration. diff --git a/src/services/qapps.jl b/src/services/qapps.jl new file mode 100644 index 0000000000..fe4605df03 --- /dev/null +++ b/src/services/qapps.jl @@ -0,0 +1,1242 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: qapps +using AWS.Compat +using AWS.UUIDs + +""" + associate_library_item_review(instance-id, library_item_id) + associate_library_item_review(instance-id, library_item_id, params::Dict{String,<:Any}) + +Associates a rating or review for a library item with the user submitting the request. This +increments the rating count for the specified library item. + +# Arguments +- `instance-id`: The unique identifier for the Amazon Q Business application environment + instance. +- `library_item_id`: The unique identifier of the library item to associate the review with. + +""" +function associate_library_item_review( + instance_id, libraryItemId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/catalog.associateItemRating", + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function associate_library_item_review( + instance_id, + libraryItemId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/catalog.associateItemRating", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + associate_qapp_with_user(app_id, instance-id) + associate_qapp_with_user(app_id, instance-id, params::Dict{String,<:Any}) + +This operation creates a link between the user's identity calling the operation and a +specific Q App. This is useful to mark the Q App as a favorite for the user if the user +doesn't own the Amazon Q App so they can still run it and see it in their inventory of Q +Apps. + +# Arguments +- `app_id`: The ID of the Amazon Q App to associate with the user. +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +""" +function associate_qapp_with_user( + appId, instance_id; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/apps.install", + Dict{String,Any}( + "appId" => appId, "headers" => Dict{String,Any}("instance-id" => instance_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function associate_qapp_with_user( + appId, + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/apps.install", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appId" => appId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_library_item(app_id, app_version, categories, instance-id) + create_library_item(app_id, app_version, categories, instance-id, params::Dict{String,<:Any}) + +Creates a new library item for an Amazon Q App, allowing it to be discovered and used by +other allowed users. + +# Arguments +- `app_id`: The unique identifier of the Amazon Q App to publish to the library. +- `app_version`: The version of the Amazon Q App to publish to the library. +- `categories`: The categories to associate with the library item for easier discovery. +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +""" +function create_library_item( + appId, + appVersion, + categories, + instance_id; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/catalog.createItem", + Dict{String,Any}( + "appId" => appId, + "appVersion" => appVersion, + "categories" => categories, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_library_item( + appId, + appVersion, + categories, + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/catalog.createItem", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appId" => appId, + "appVersion" => appVersion, + "categories" => categories, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + create_qapp(app_definition, instance-id, title) + create_qapp(app_definition, instance-id, title, params::Dict{String,<:Any}) + +Creates a new Amazon Q App based on the provided definition. The Q App definition specifies +the cards and flow of the Q App. This operation also calculates the dependencies between +the cards by inspecting the references in the prompts. + +# Arguments +- `app_definition`: The definition of the new Q App, specifying the cards and flow. +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `title`: The title of the new Q App. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"description"`: The description of the new Q App. +- `"tags"`: Optional tags to associate with the new Q App. +""" +function create_qapp( + appDefinition, instance_id, title; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/apps.create", + Dict{String,Any}( + "appDefinition" => appDefinition, + "title" => title, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_qapp( + appDefinition, + instance_id, + title, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/apps.create", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appDefinition" => appDefinition, + "title" => title, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_library_item(instance-id, library_item_id) + delete_library_item(instance-id, library_item_id, params::Dict{String,<:Any}) + +Deletes a library item for an Amazon Q App, removing it from the library so it can no +longer be discovered or used by other users. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `library_item_id`: The unique identifier of the library item to delete. + +""" +function delete_library_item( + instance_id, libraryItemId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/catalog.deleteItem", + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_library_item( + instance_id, + libraryItemId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/catalog.deleteItem", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_qapp(app_id, instance-id) + delete_qapp(app_id, instance-id, params::Dict{String,<:Any}) + +Deletes an Amazon Q App owned by the user. If the Q App was previously published to the +library, it is also removed from the library. + +# Arguments +- `app_id`: The unique identifier of the Q App to delete. +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +""" +function delete_qapp(appId, instance_id; aws_config::AbstractAWSConfig=global_aws_config()) + return qapps( + "POST", + "/apps.delete", + Dict{String,Any}( + "appId" => appId, "headers" => Dict{String,Any}("instance-id" => instance_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_qapp( + appId, + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/apps.delete", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appId" => appId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + disassociate_library_item_review(instance-id, library_item_id) + disassociate_library_item_review(instance-id, library_item_id, params::Dict{String,<:Any}) + +Removes a rating or review previously submitted by the user for a library item. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `library_item_id`: The unique identifier of the library item to remove the review from. + +""" +function disassociate_library_item_review( + instance_id, libraryItemId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/catalog.disassociateItemRating", + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function disassociate_library_item_review( + instance_id, + libraryItemId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/catalog.disassociateItemRating", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + disassociate_qapp_from_user(app_id, instance-id) + disassociate_qapp_from_user(app_id, instance-id, params::Dict{String,<:Any}) + +Disassociates a Q App from a user removing the user's access to run the Q App. + +# Arguments +- `app_id`: The unique identifier of the Q App to disassociate from the user. +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +""" +function disassociate_qapp_from_user( + appId, instance_id; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/apps.uninstall", + Dict{String,Any}( + "appId" => appId, "headers" => Dict{String,Any}("instance-id" => instance_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function disassociate_qapp_from_user( + appId, + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/apps.uninstall", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appId" => appId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_library_item(instance-id, library_item_id) + get_library_item(instance-id, library_item_id, params::Dict{String,<:Any}) + +Retrieves details about a library item for an Amazon Q App, including its metadata, +categories, ratings, and usage statistics. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `library_item_id`: The unique identifier of the library item to retrieve. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"appId"`: The unique identifier of the Amazon Q App associated with the library item. +""" +function get_library_item( + instance_id, libraryItemId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "GET", + "/catalog.getItem", + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_library_item( + instance_id, + libraryItemId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "GET", + "/catalog.getItem", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_qapp(app_id, instance-id) + get_qapp(app_id, instance-id, params::Dict{String,<:Any}) + +Retrieves the full details of an Q App, including its definition specifying the cards and +flow. + +# Arguments +- `app_id`: The unique identifier of the Q App to retrieve. +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +""" +function get_qapp(appId, instance_id; aws_config::AbstractAWSConfig=global_aws_config()) + return qapps( + "GET", + "/apps.get", + Dict{String,Any}( + "appId" => appId, "headers" => Dict{String,Any}("instance-id" => instance_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_qapp( + appId, + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "GET", + "/apps.get", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appId" => appId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_qapp_session(instance-id, session_id) + get_qapp_session(instance-id, session_id, params::Dict{String,<:Any}) + +Retrieves the current state and results for an active session of an Amazon Q App. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `session_id`: The unique identifier of the Q App session to retrieve. + +""" +function get_qapp_session( + instance_id, sessionId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "GET", + "/runtime.getQAppSession", + Dict{String,Any}( + "sessionId" => sessionId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_qapp_session( + instance_id, + sessionId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "GET", + "/runtime.getQAppSession", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "sessionId" => sessionId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + import_document(app_id, card_id, file_contents_base64, file_name, instance-id, scope) + import_document(app_id, card_id, file_contents_base64, file_name, instance-id, scope, params::Dict{String,<:Any}) + +Uploads a file that can then be used either as a default in a FileUploadCard from Q App +definition or as a file that is used inside a single Q App run. The purpose of the document +is determined by a scope parameter that indicates whether it is at the app definition level +or at the app session level. + +# Arguments +- `app_id`: The unique identifier of the Q App the file is associated with. +- `card_id`: The unique identifier of the card the file is associated with, if applicable. +- `file_contents_base64`: The base64-encoded contents of the file to upload. +- `file_name`: The name of the file being uploaded. +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `scope`: Whether the file is associated with an Q App definition or a specific Q App + session. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"sessionId"`: The unique identifier of the Q App session the file is associated with, if + applicable. +""" +function import_document( + appId, + cardId, + fileContentsBase64, + fileName, + instance_id, + scope; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/apps.importDocument", + Dict{String,Any}( + "appId" => appId, + "cardId" => cardId, + "fileContentsBase64" => fileContentsBase64, + "fileName" => fileName, + "scope" => scope, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function import_document( + appId, + cardId, + fileContentsBase64, + fileName, + instance_id, + scope, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/apps.importDocument", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appId" => appId, + "cardId" => cardId, + "fileContentsBase64" => fileContentsBase64, + "fileName" => fileName, + "scope" => scope, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_library_items(instance-id) + list_library_items(instance-id, params::Dict{String,<:Any}) + +Lists the library items for Amazon Q Apps that are published and available for users in +your Amazon Web Services account. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"categoryId"`: Optional category to filter the library items by. +- `"limit"`: The maximum number of library items to return in the response. +- `"nextToken"`: The token to request the next page of results. +""" +function list_library_items(instance_id; aws_config::AbstractAWSConfig=global_aws_config()) + return qapps( + "GET", + "/catalog.list", + Dict{String,Any}("headers" => Dict{String,Any}("instance-id" => instance_id)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_library_items( + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "GET", + "/catalog.list", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "headers" => Dict{String,Any}("instance-id" => instance_id) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_qapps(instance-id) + list_qapps(instance-id, params::Dict{String,<:Any}) + +Lists the Amazon Q Apps owned by or associated with the user either because they created it +or because they used it from the library in the past. The user identity is extracted from +the credentials used to invoke this operation.. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"limit"`: The maximum number of Q Apps to return in the response. +- `"nextToken"`: The token to request the next page of results. +""" +function list_qapps(instance_id; aws_config::AbstractAWSConfig=global_aws_config()) + return qapps( + "GET", + "/apps.list", + Dict{String,Any}("headers" => Dict{String,Any}("instance-id" => instance_id)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_qapps( + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "GET", + "/apps.list", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "headers" => Dict{String,Any}("instance-id" => instance_id) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Lists the tags associated with an Amazon Q Apps resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource whose tags should be + listed. + +""" +function list_tags_for_resource( + resourceARN; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "GET", + "/tags/$(resourceARN)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + resourceARN, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "GET", + "/tags/$(resourceARN)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + predict_qapp(instance-id) + predict_qapp(instance-id, params::Dict{String,<:Any}) + +Generates an Amazon Q App definition based on either a conversation or a problem statement +provided as input.The resulting app definition can be used to call CreateQApp. This API +doesn't create Amazon Q Apps directly. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"options"`: The input to generate the Q App definition from, either a conversation or + problem statement. +""" +function predict_qapp(instance_id; aws_config::AbstractAWSConfig=global_aws_config()) + return qapps( + "POST", + "/apps.predictQApp", + Dict{String,Any}("headers" => Dict{String,Any}("instance-id" => instance_id)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function predict_qapp( + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/apps.predictQApp", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "headers" => Dict{String,Any}("instance-id" => instance_id) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + start_qapp_session(app_id, app_version, instance-id) + start_qapp_session(app_id, app_version, instance-id, params::Dict{String,<:Any}) + +Starts a new session for an Amazon Q App, allowing inputs to be provided and the app to be +run. Each Q App session will be condensed into a single conversation in the web +experience. + +# Arguments +- `app_id`: The unique identifier of the Q App to start a session for. +- `app_version`: The version of the Q App to use for the session. +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"initialValues"`: Optional initial input values to provide for the Q App session. +- `"tags"`: Optional tags to associate with the new Q App session. +""" +function start_qapp_session( + appId, appVersion, instance_id; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/runtime.startQAppSession", + Dict{String,Any}( + "appId" => appId, + "appVersion" => appVersion, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_qapp_session( + appId, + appVersion, + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/runtime.startQAppSession", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appId" => appId, + "appVersion" => appVersion, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + stop_qapp_session(instance-id, session_id) + stop_qapp_session(instance-id, session_id, params::Dict{String,<:Any}) + +Stops an active session for an Amazon Q App.This deletes all data related to the session +and makes it invalid for future uses. The results of the session will be persisted as part +of the conversation. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `session_id`: The unique identifier of the Q App session to stop. + +""" +function stop_qapp_session( + instance_id, sessionId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/runtime.deleteMiniAppRun", + Dict{String,Any}( + "sessionId" => sessionId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_qapp_session( + instance_id, + sessionId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/runtime.deleteMiniAppRun", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "sessionId" => sessionId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Associates tags with an Amazon Q Apps resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource to tag. +- `tags`: The tags to associate with the resource. + +""" +function tag_resource(resourceARN, tags; aws_config::AbstractAWSConfig=global_aws_config()) + return qapps( + "POST", + "/tags/$(resourceARN)", + Dict{String,Any}("tags" => tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + resourceARN, + tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/tags/$(resourceARN)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tags" => tags), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Disassociates tags from an Amazon Q Apps resource. + +# Arguments +- `resource_arn`: The Amazon Resource Name (ARN) of the resource to disassociate the tag + from. +- `tag_keys`: The keys of the tags to disassociate from the resource. + +""" +function untag_resource( + resourceARN, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "DELETE", + "/tags/$(resourceARN)", + Dict{String,Any}("tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + resourceARN, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "DELETE", + "/tags/$(resourceARN)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_library_item(instance-id, library_item_id) + update_library_item(instance-id, library_item_id, params::Dict{String,<:Any}) + +Updates the library item for an Amazon Q App. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `library_item_id`: The unique identifier of the library item to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"categories"`: The new categories to associate with the library item. +- `"status"`: The new status to set for the library item, such as \"Published\" or + \"Hidden\". +""" +function update_library_item( + instance_id, libraryItemId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/catalog.updateItem", + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_library_item( + instance_id, + libraryItemId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/catalog.updateItem", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_library_item_metadata(instance-id, library_item_id) + update_library_item_metadata(instance-id, library_item_id, params::Dict{String,<:Any}) + +Updates the verification status of a library item for an Amazon Q App. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `library_item_id`: The unique identifier of the updated library item. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"isVerified"`: The verification status of the library item +""" +function update_library_item_metadata( + instance_id, libraryItemId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/catalog.updateItemMetadata", + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_library_item_metadata( + instance_id, + libraryItemId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/catalog.updateItemMetadata", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "libraryItemId" => libraryItemId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_qapp(app_id, instance-id) + update_qapp(app_id, instance-id, params::Dict{String,<:Any}) + +Updates an existing Amazon Q App, allowing modifications to its title, description, and +definition. + +# Arguments +- `app_id`: The unique identifier of the Q App to update. +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"appDefinition"`: The new definition specifying the cards and flow for the Q App. +- `"description"`: The new description for the Q App. +- `"title"`: The new title for the Q App. +""" +function update_qapp(appId, instance_id; aws_config::AbstractAWSConfig=global_aws_config()) + return qapps( + "POST", + "/apps.update", + Dict{String,Any}( + "appId" => appId, "headers" => Dict{String,Any}("instance-id" => instance_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_qapp( + appId, + instance_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/apps.update", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "appId" => appId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_qapp_session(instance-id, session_id) + update_qapp_session(instance-id, session_id, params::Dict{String,<:Any}) + +Updates the session for a given Q App sessionId. This is only valid when at least one card +of the session is in the WAITING state. Data for each WAITING card can be provided as +input. If inputs are not provided, the call will be accepted but session will not move +forward. Inputs for cards that are not in the WAITING status will be ignored. + +# Arguments +- `instance-id`: The unique identifier of the Amazon Q Business application environment + instance. +- `session_id`: The unique identifier of the Q App session to provide input for. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"values"`: The input values to provide for the current state of the Q App session. +""" +function update_qapp_session( + instance_id, sessionId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qapps( + "POST", + "/runtime.updateQAppSession", + Dict{String,Any}( + "sessionId" => sessionId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_qapp_session( + instance_id, + sessionId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qapps( + "POST", + "/runtime.updateQAppSession", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "sessionId" => sessionId, + "headers" => Dict{String,Any}("instance-id" => instance_id), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/qbusiness.jl b/src/services/qbusiness.jl index 9fb58ab1af..49238115e1 100644 --- a/src/services/qbusiness.jl +++ b/src/services/qbusiness.jl @@ -183,14 +183,20 @@ assign subscription tiers to users. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"attachmentsConfiguration"`: An option to allow end users to upload files directly during chat. +- `"clientIdsForOIDC"`: The OIDC client ID for a Amazon Q Business application. - `"clientToken"`: A token that you provide to identify the request to create your Amazon Q Business application. - `"description"`: A description for the Amazon Q Business application. - `"encryptionConfiguration"`: The identifier of the KMS key that is used to encrypt your data. Amazon Q Business doesn't support asymmetric keys. +- `"iamIdentityProviderArn"`: The Amazon Resource Name (ARN) of an identity provider being + used by an Amazon Q Business application. - `"identityCenterInstanceArn"`: The Amazon Resource Name (ARN) of the IAM Identity Center instance you are either creating for—or connecting to—your Amazon Q Business application. +- `"identityType"`: The authentication type being used by a Amazon Q Business application. +- `"personalizationConfiguration"`: Configuration information about chat response + personalization. For more information, see Personalizing chat responses - `"qAppsConfiguration"`: An option to allow end users to create and use Amazon Q Apps in the web experience. - `"roleArn"`: The Amazon Resource Name (ARN) of an IAM role with permissions to access @@ -242,8 +248,16 @@ created. Otherwise, an exception is raised. # Arguments - `application_id`: The identifier of the Amazon Q Business application the data source will be attached to. -- `configuration`: Configuration information to connect to your data source repository. For - configuration templates for your specific data source, see Supported connectors. +- `configuration`: Configuration information to connect your data source repository to + Amazon Q Business. Use this parameter to provide a JSON schema with configuration + information specific to your data source connector. Each data source has a JSON schema + provided by Amazon Q Business that you must use. For example, the Amazon S3 and Web Crawler + connectors require the following JSON schemas: Amazon S3 JSON schema Web Crawler + JSON schema You can find configuration templates for your specific data source using the + following steps: Navigate to the Supported connectors page in the Amazon Q Business User + Guide, and select the data source of your choice. Then, from your specific data source + connector page, select Using the API. You will find the JSON schema for your data source, + including parameter descriptions, in this section. - `display_name`: A name for the data source connector. - `index_id`: The identifier of the index that you want to use with the data source connector. @@ -581,8 +595,12 @@ Creates an Amazon Q Business web experience. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"clientToken"`: A token you provide to identify a request to create an Amazon Q Business web experience. +- `"identityProviderConfiguration"`: Information about the identity provider (IdP) used to + authenticate end users of an Amazon Q Business web experience. - `"roleArn"`: The Amazon Resource Name (ARN) of the service role attached to your web - experience. + experience. You must provide this value if you're using IAM Identity Center to manage end + user access to your application. If you're using legacy identity management to manage user + access, you don't need to provide this value. - `"samplePromptsControlMode"`: Determines whether sample prompts are enabled in the web experience for an end user. - `"subtitle"`: A subtitle to personalize your Amazon Q Business web experience. @@ -1904,11 +1922,7 @@ top-secret company documents in their Amazon Q Business chat results. - `group_members`: - `group_name`: The list that contains your users or sub groups that belong the same group. For example, the group \"Company\" includes the user \"CEO\" and the sub groups - \"Research\", \"Engineering\", and \"Sales and Marketing\". If you have more than 1000 - users and/or sub groups for a single group, you need to provide the path to the S3 file - that lists your users and sub groups for a group. Your sub groups can contain more than - 1000 users, but the list of sub groups that belong to a group (and/or users) must be no - more than 1000. + \"Research\", \"Engineering\", and \"Sales and Marketing\". - `index_id`: The identifier of the index in which you want to map users to their groups. - `type`: The type of the group. @@ -2135,11 +2149,16 @@ Updates an existing Amazon Q Business application. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"attachmentsConfiguration"`: An option to allow end users to upload files directly during chat. +- `"autoSubscriptionConfiguration"`: An option to enable updating the default subscription + type assigned to an Amazon Q Business application using IAM identity federation for user + management. - `"description"`: A description for the Amazon Q Business application. - `"displayName"`: A name for the Amazon Q Business application. - `"identityCenterInstanceArn"`: The Amazon Resource Name (ARN) of the IAM Identity Center instance you are either creating for—or connecting to—your Amazon Q Business application. +- `"personalizationConfiguration"`: Configuration information about chat response + personalization. For more information, see Personalizing chat responses. - `"qAppsConfiguration"`: An option to allow end users to create and use Amazon Q Apps in the web experience. - `"roleArn"`: An Amazon Web Services Identity and Access Management (IAM) role that gives @@ -2458,6 +2477,8 @@ Updates an Amazon Q Business web experience. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"authenticationConfiguration"`: The authentication configuration of the Amazon Q Business web experience. +- `"identityProviderConfiguration"`: Information about the identity provider (IdP) used to + authenticate end users of an Amazon Q Business web experience. - `"roleArn"`: The Amazon Resource Name (ARN) of the role with permission to access the Amazon Q Business web experience and required resources. - `"samplePromptsControlMode"`: Determines whether sample prompts are enabled in the web diff --git a/src/services/qconnect.jl b/src/services/qconnect.jl index 29c813e3b2..1680362d5f 100644 --- a/src/services/qconnect.jl +++ b/src/services/qconnect.jl @@ -195,6 +195,80 @@ function create_content( ) end +""" + create_content_association(association, association_type, content_id, knowledge_base_id) + create_content_association(association, association_type, content_id, knowledge_base_id, params::Dict{String,<:Any}) + +Creates an association between a content resource in a knowledge base and step-by-step +guides. Step-by-step guides offer instructions to agents for resolving common customer +issues. You create a content association to integrate Amazon Q in Connect and step-by-step +guides. After you integrate Amazon Q and step-by-step guides, when Amazon Q provides a +recommendation to an agent based on the intent that it's detected, it also provides them +with the option to start the step-by-step guide that you have associated with the content. +Note the following limitations: You can create only one content association for each +content resource in a knowledge base. You can associate a step-by-step guide with +multiple content resources. For more information, see Integrate Amazon Q in Connect with +step-by-step guides in the Amazon Connect Administrator Guide. + +# Arguments +- `association`: The identifier of the associated resource. +- `association_type`: The type of association. +- `content_id`: The identifier of the content. +- `knowledge_base_id`: The identifier of the knowledge base. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"clientToken"`: A unique, case-sensitive identifier that you provide to ensure the + idempotency of the request. If not provided, the Amazon Web Services SDK populates this + field. For more information about idempotency, see Making retries safe with idempotent APIs. +- `"tags"`: The tags used to organize, track, or control access for this resource. +""" +function create_content_association( + association, + associationType, + contentId, + knowledgeBaseId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "POST", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)/associations", + Dict{String,Any}( + "association" => association, + "associationType" => associationType, + "clientToken" => string(uuid4()), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_content_association( + association, + associationType, + contentId, + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "POST", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)/associations", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "association" => association, + "associationType" => associationType, + "clientToken" => string(uuid4()), + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_knowledge_base(knowledge_base_type, name) create_knowledge_base(knowledge_base_type, name, params::Dict{String,<:Any}) @@ -502,6 +576,50 @@ function delete_content( ) end +""" + delete_content_association(content_association_id, content_id, knowledge_base_id) + delete_content_association(content_association_id, content_id, knowledge_base_id, params::Dict{String,<:Any}) + +Deletes the content association. For more information about content associations--what +they are and when they are used--see Integrate Amazon Q in Connect with step-by-step guides +in the Amazon Connect Administrator Guide. + +# Arguments +- `content_association_id`: The identifier of the content association. Can be either the ID + or the ARN. URLs cannot contain the ARN. +- `content_id`: The identifier of the content. +- `knowledge_base_id`: The identifier of the knowledge base. + +""" +function delete_content_association( + contentAssociationId, + contentId, + knowledgeBaseId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "DELETE", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)/associations/$(contentAssociationId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_content_association( + contentAssociationId, + contentId, + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "DELETE", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)/associations/$(contentAssociationId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_import_job(import_job_id, knowledge_base_id) delete_import_job(import_job_id, knowledge_base_id, params::Dict{String,<:Any}) @@ -725,6 +843,50 @@ function get_content( ) end +""" + get_content_association(content_association_id, content_id, knowledge_base_id) + get_content_association(content_association_id, content_id, knowledge_base_id, params::Dict{String,<:Any}) + +Returns the content association. For more information about content associations--what they +are and when they are used--see Integrate Amazon Q in Connect with step-by-step guides in +the Amazon Connect Administrator Guide. + +# Arguments +- `content_association_id`: The identifier of the content association. Can be either the ID + or the ARN. URLs cannot contain the ARN. +- `content_id`: The identifier of the content. +- `knowledge_base_id`: The identifier of the knowledge base. + +""" +function get_content_association( + contentAssociationId, + contentId, + knowledgeBaseId; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)/associations/$(contentAssociationId)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_content_association( + contentAssociationId, + contentId, + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)/associations/$(contentAssociationId)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ get_content_summary(content_id, knowledge_base_id) get_content_summary(content_id, knowledge_base_id, params::Dict{String,<:Any}) @@ -1026,6 +1188,49 @@ function list_assistants( ) end +""" + list_content_associations(content_id, knowledge_base_id) + list_content_associations(content_id, knowledge_base_id, params::Dict{String,<:Any}) + +Lists the content associations. For more information about content associations--what they +are and when they are used--see Integrate Amazon Q in Connect with step-by-step guides in +the Amazon Connect Administrator Guide. + +# Arguments +- `content_id`: The identifier of the content. +- `knowledge_base_id`: The identifier of the knowledge base. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of results to return per page. +- `"nextToken"`: The token for the next set of results. Use the value returned in the + previous response in the next request to retrieve the next set of results. +""" +function list_content_associations( + contentId, knowledgeBaseId; aws_config::AbstractAWSConfig=global_aws_config() +) + return qconnect( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)/associations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_content_associations( + contentId, + knowledgeBaseId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return qconnect( + "GET", + "/knowledgeBases/$(knowledgeBaseId)/contents/$(contentId)/associations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_contents(knowledge_base_id) list_contents(knowledge_base_id, params::Dict{String,<:Any}) diff --git a/src/services/quicksight.jl b/src/services/quicksight.jl index 6b911be5f0..bfc5e371a1 100644 --- a/src/services/quicksight.jl +++ b/src/services/quicksight.jl @@ -4,6 +4,88 @@ using AWS.AWSServices: quicksight using AWS.Compat using AWS.UUIDs +""" + batch_create_topic_reviewed_answer(answers, aws_account_id, topic_id) + batch_create_topic_reviewed_answer(answers, aws_account_id, topic_id, params::Dict{String,<:Any}) + +Creates new reviewed answers for a Q Topic. + +# Arguments +- `answers`: The definition of the Answers to be created. +- `aws_account_id`: The ID of the Amazon Web Services account that you want to create a + reviewed answer in. +- `topic_id`: The ID for the topic reviewed answer that you want to create. This ID is + unique per Amazon Web Services Region for each Amazon Web Services account. + +""" +function batch_create_topic_reviewed_answer( + Answers, AwsAccountId, TopicId; aws_config::AbstractAWSConfig=global_aws_config() +) + return quicksight( + "POST", + "/accounts/$(AwsAccountId)/topics/$(TopicId)/batch-create-reviewed-answers", + Dict{String,Any}("Answers" => Answers); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_create_topic_reviewed_answer( + Answers, + AwsAccountId, + TopicId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "POST", + "/accounts/$(AwsAccountId)/topics/$(TopicId)/batch-create-reviewed-answers", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Answers" => Answers), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + batch_delete_topic_reviewed_answer(aws_account_id, topic_id) + batch_delete_topic_reviewed_answer(aws_account_id, topic_id, params::Dict{String,<:Any}) + +Deletes reviewed answers for Q Topic. + +# Arguments +- `aws_account_id`: The ID of the Amazon Web Services account that you want to delete a + reviewed answers in. +- `topic_id`: The ID for the topic reviewed answer that you want to delete. This ID is + unique per Amazon Web Services Region for each Amazon Web Services account. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AnswerIds"`: The Answer IDs of the Answers to be deleted. +""" +function batch_delete_topic_reviewed_answer( + AwsAccountId, TopicId; aws_config::AbstractAWSConfig=global_aws_config() +) + return quicksight( + "POST", + "/accounts/$(AwsAccountId)/topics/$(TopicId)/batch-delete-reviewed-answers"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function batch_delete_topic_reviewed_answer( + AwsAccountId, + TopicId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "POST", + "/accounts/$(AwsAccountId)/topics/$(TopicId)/batch-delete-reviewed-answers", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ cancel_ingestion(aws_account_id, data_set_id, ingestion_id) cancel_ingestion(aws_account_id, data_set_id, ingestion_id, params::Dict{String,<:Any}) @@ -5643,6 +5725,44 @@ function list_topic_refresh_schedules( ) end +""" + list_topic_reviewed_answers(aws_account_id, topic_id) + list_topic_reviewed_answers(aws_account_id, topic_id, params::Dict{String,<:Any}) + +Lists all reviewed answers for a Q Topic. + +# Arguments +- `aws_account_id`: The ID of the Amazon Web Services account that containd the reviewed + answers that you want listed. +- `topic_id`: The ID for the topic that contains the reviewed answer that you want to list. + This ID is unique per Amazon Web Services Region for each Amazon Web Services account. + +""" +function list_topic_reviewed_answers( + AwsAccountId, TopicId; aws_config::AbstractAWSConfig=global_aws_config() +) + return quicksight( + "GET", + "/accounts/$(AwsAccountId)/topics/$(TopicId)/reviewed-answers"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_topic_reviewed_answers( + AwsAccountId, + TopicId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return quicksight( + "GET", + "/accounts/$(AwsAccountId)/topics/$(TopicId)/reviewed-answers", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_topics(aws_account_id) list_topics(aws_account_id, params::Dict{String,<:Any}) diff --git a/src/services/rds.jl b/src/services/rds.jl index aaddc12b25..0593bda179 100644 --- a/src/services/rds.jl +++ b/src/services/rds.jl @@ -1146,7 +1146,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"DBSubnetGroupName"`: A DB subnet group to associate with this DB cluster. This setting is required to create a Multi-AZ DB cluster. Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters Constraints: Must match the name of an existing DB subnet group. - Must not be default. Example: mydbsubnetgroup + Example: mydbsubnetgroup - `"DBSystemId"`: Reserved for future use. - `"DatabaseName"`: The name for your database of up to 64 alphanumeric characters. A database named postgres is always created. If this parameter is specified, an additional @@ -1365,22 +1365,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Tue | Wed | Thu | Fri | Sat | Sun. Must be in Universal Coordinated Time (UTC). Must be at least 30 minutes. - `"PubliclyAccessible"`: Specifies whether the DB cluster is publicly accessible. When the - DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to the - private IP address from within the DB cluster's virtual private cloud (VPC). It resolves to - the public IP address from outside of the DB cluster's VPC. Access to the DB cluster is - ultimately controlled by the security group it uses. That public access isn't permitted if - the security group assigned to the DB cluster doesn't permit it. When the DB cluster isn't - publicly accessible, it is an internal DB cluster with a DNS name that resolves to a - private IP address. Valid for Cluster Type: Multi-AZ DB clusters only Default: The default - behavior varies depending on whether DBSubnetGroupName is specified. If DBSubnetGroupName - isn't specified, and PubliclyAccessible isn't specified, the following applies: If the - default VPC in the target Region doesn’t have an internet gateway attached to it, the DB - cluster is private. If the default VPC in the target Region has an internet gateway - attached to it, the DB cluster is public. If DBSubnetGroupName is specified, and - PubliclyAccessible isn't specified, the following applies: If the subnets are part of a - VPC that doesn’t have an internet gateway attached to it, the DB cluster is private. If - the subnets are part of a VPC that has an internet gateway attached to it, the DB cluster - is public. + DB cluster is publicly accessible and you connect from outside of the DB cluster's virtual + private cloud (VPC), its Domain Name System (DNS) endpoint resolves to the public IP + address. When you connect from within the same VPC as the DB cluster, the endpoint resolves + to the private IP address. Access to the DB cluster is ultimately controlled by the + security group it uses. That public access isn't permitted if the security group assigned + to the DB cluster doesn't permit it. When the DB cluster isn't publicly accessible, it is + an internal DB cluster with a DNS name that resolves to a private IP address. Valid for + Cluster Type: Multi-AZ DB clusters only Default: The default behavior varies depending on + whether DBSubnetGroupName is specified. If DBSubnetGroupName isn't specified, and + PubliclyAccessible isn't specified, the following applies: If the default VPC in the + target Region doesn’t have an internet gateway attached to it, the DB cluster is private. + If the default VPC in the target Region has an internet gateway attached to it, the DB + cluster is public. If DBSubnetGroupName is specified, and PubliclyAccessible isn't + specified, the following applies: If the subnets are part of a VPC that doesn’t have an + internet gateway attached to it, the DB cluster is private. If the subnets are part of a + VPC that has an internet gateway attached to it, the DB cluster is public. - `"RdsCustomClusterConfiguration"`: Reserved for future use. - `"ReplicationSourceIdentifier"`: The Amazon Resource Name (ARN) of the source DB instance or DB cluster if this DB cluster is created as a read replica. Valid for Cluster Type: @@ -2049,21 +2049,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide. This setting doesn't apply to RDS Custom DB instances. Default: 1 Valid Values: 0 - 15 - `"PubliclyAccessible"`: Specifies whether the DB instance is publicly accessible. When - the DB instance is publicly accessible, its Domain Name System (DNS) endpoint resolves to - the private IP address from within the DB instance's virtual private cloud (VPC). It - resolves to the public IP address from outside of the DB instance's VPC. Access to the DB - instance is ultimately controlled by the security group it uses. That public access is not - permitted if the security group assigned to the DB instance doesn't permit it. When the DB - instance isn't publicly accessible, it is an internal DB instance with a DNS name that - resolves to a private IP address. Default: The default behavior varies depending on whether - DBSubnetGroupName is specified. If DBSubnetGroupName isn't specified, and - PubliclyAccessible isn't specified, the following applies: If the default VPC in the - target Region doesn’t have an internet gateway attached to it, the DB instance is - private. If the default VPC in the target Region has an internet gateway attached to it, - the DB instance is public. If DBSubnetGroupName is specified, and PubliclyAccessible - isn't specified, the following applies: If the subnets are part of a VPC that doesn’t - have an internet gateway attached to it, the DB instance is private. If the subnets are - part of a VPC that has an internet gateway attached to it, the DB instance is public. + the DB instance is publicly accessible and you connect from outside of the DB instance's + virtual private cloud (VPC), its Domain Name System (DNS) endpoint resolves to the public + IP address. When you connect from within the same VPC as the DB instance, the endpoint + resolves to the private IP address. Access to the DB instance is ultimately controlled by + the security group it uses. That public access is not permitted if the security group + assigned to the DB instance doesn't permit it. When the DB instance isn't publicly + accessible, it is an internal DB instance with a DNS name that resolves to a private IP + address. Default: The default behavior varies depending on whether DBSubnetGroupName is + specified. If DBSubnetGroupName isn't specified, and PubliclyAccessible isn't specified, + the following applies: If the default VPC in the target Region doesn’t have an internet + gateway attached to it, the DB instance is private. If the default VPC in the target + Region has an internet gateway attached to it, the DB instance is public. If + DBSubnetGroupName is specified, and PubliclyAccessible isn't specified, the following + applies: If the subnets are part of a VPC that doesn’t have an internet gateway + attached to it, the DB instance is private. If the subnets are part of a VPC that has an + internet gateway attached to it, the DB instance is public. - `"StorageEncrypted"`: Specifes whether the DB instance is encrypted. By default, it isn't encrypted. For RDS Custom DB instances, either enable this setting or leave it unset. Otherwise, Amazon RDS reports an error. This setting doesn't apply to Amazon Aurora DB @@ -2757,6 +2758,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - Creates a primary DB instance and a standby instance in a different Availability Zone (AZ) for each physical shard. 2 - Creates a primary DB instance and two standby instances in different AZs for each physical shard. +- `"MinACU"`: The minimum capacity of the DB shard group in Aurora capacity units (ACUs). - `"PubliclyAccessible"`: Specifies whether the DB shard group is publicly accessible. When the DB shard group is publicly accessible, its Domain Name System (DNS) endpoint resolves to the private IP address from within the DB shard group's virtual private cloud (VPC). It @@ -3432,16 +3434,17 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys after the DB cluster is deleted. This parameter isn't case-sensitive. The default is to remove automated backups immediately after the DB cluster is deleted. - `"FinalDBSnapshotIdentifier"`: The DB cluster snapshot identifier of the new DB cluster - snapshot created when SkipFinalSnapshot is disabled. Specifying this parameter and also - skipping the creation of a final DB cluster snapshot with the SkipFinalShapshot parameter - results in an error. Constraints: Must be 1 to 255 letters, numbers, or hyphens. First - character must be a letter Can't end with a hyphen or contain two consecutive hyphens + snapshot created when SkipFinalSnapshot is disabled. If you specify this parameter and + also skip the creation of a final DB cluster snapshot with the SkipFinalShapshot parameter, + the request results in an error. Constraints: Must be 1 to 255 letters, numbers, or + hyphens. First character must be a letter Can't end with a hyphen or contain two + consecutive hyphens - `"SkipFinalSnapshot"`: Specifies whether to skip the creation of a final DB cluster - snapshot before the DB cluster is deleted. If skip is specified, no DB cluster snapshot is - created. If skip isn't specified, a DB cluster snapshot is created before the DB cluster is - deleted. By default, skip isn't specified, and the DB cluster snapshot is created. By - default, this parameter is disabled. You must specify a FinalDBSnapshotIdentifier - parameter if SkipFinalSnapshot is disabled. + snapshot before RDS deletes the DB cluster. If you set this value to true, RDS doesn't + create a final DB cluster snapshot. If you set this value to false or don't specify it, RDS + creates a DB cluster snapshot before it deletes the DB cluster. By default, this parameter + is disabled, so RDS creates a final DB cluster snapshot. If SkipFinalSnapshot is disabled, + you must specify a value for the FinalDBSnapshotIdentifier parameter. """ function delete_dbcluster( DBClusterIdentifier; aws_config::AbstractAWSConfig=global_aws_config() @@ -6204,7 +6207,11 @@ end describe_pending_maintenance_actions(params::Dict{String,<:Any}) Returns a list of resources (for example, DB instances) that have at least one pending -maintenance action. +maintenance action. This API follows an eventual consistency model. This means that the +result of the DescribePendingMaintenanceActions command might not be immediately visible to +all subsequent RDS commands. Keep this in mind when you use +DescribePendingMaintenanceActions immediately after using a previous API command such as +ApplyPendingMaintenanceActions. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -7872,16 +7879,17 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys see Fault Tolerance for an Aurora DB Cluster in the Amazon Aurora User Guide. This setting doesn't apply to RDS Custom DB instances. Default: 1 Valid Values: 0 - 15 - `"PubliclyAccessible"`: Specifies whether the DB instance is publicly accessible. When - the DB cluster is publicly accessible, its Domain Name System (DNS) endpoint resolves to - the private IP address from within the DB cluster's virtual private cloud (VPC). It - resolves to the public IP address from outside of the DB cluster's VPC. Access to the DB - cluster is ultimately controlled by the security group it uses. That public access isn't - permitted if the security group assigned to the DB cluster doesn't permit it. When the DB - instance isn't publicly accessible, it is an internal DB instance with a DNS name that - resolves to a private IP address. PubliclyAccessible only applies to DB instances in a - VPC. The DB instance must be part of a public subnet and PubliclyAccessible must be enabled - for it to be publicly accessible. Changes to the PubliclyAccessible parameter are applied - immediately regardless of the value of the ApplyImmediately parameter. + the DB instance is publicly accessible and you connect from outside of the DB instance's + virtual private cloud (VPC), its Domain Name System (DNS) endpoint resolves to the public + IP address. When you connect from within the same VPC as the DB instance, the endpoint + resolves to the private IP address. Access to the DB instance is ultimately controlled by + the security group it uses. That public access isn't permitted if the security group + assigned to the DB instance doesn't permit it. When the DB instance isn't publicly + accessible, it is an internal DB instance with a DNS name that resolves to a private IP + address. PubliclyAccessible only applies to DB instances in a VPC. The DB instance must be + part of a public subnet and PubliclyAccessible must be enabled for it to be publicly + accessible. Changes to the PubliclyAccessible parameter are applied immediately regardless + of the value of the ApplyImmediately parameter. - `"ReplicaMode"`: A value that sets the open mode of a replica database to either mounted or read-only. Currently, this parameter is only supported for Oracle DB instances. Mounted DB replicas are included in Oracle Enterprise Edition. The main use case for @@ -8252,6 +8260,7 @@ more settings by specifying these parameters and the new values in the request. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"MaxACU"`: The maximum capacity of the DB shard group in Aurora capacity units (ACUs). +- `"MinACU"`: The minimum capacity of the DB shard group in Aurora capacity units (ACUs). """ function modify_dbshard_group( DBShardGroupIdentifier; aws_config::AbstractAWSConfig=global_aws_config() @@ -11051,11 +11060,10 @@ end Starts an export of DB snapshot or DB cluster data to Amazon S3. The provided IAM role must have access to the S3 bucket. You can't export snapshot data from Db2 or RDS Custom DB -instances. You can't export cluster data from Multi-AZ DB clusters. For more information on -exporting DB snapshot data, see Exporting DB snapshot data to Amazon S3 in the Amazon RDS -User Guide or Exporting DB cluster snapshot data to Amazon S3 in the Amazon Aurora User -Guide. For more information on exporting DB cluster data, see Exporting DB cluster data to -Amazon S3 in the Amazon Aurora User Guide. +instances. For more information on exporting DB snapshot data, see Exporting DB snapshot +data to Amazon S3 in the Amazon RDS User Guide or Exporting DB cluster snapshot data to +Amazon S3 in the Amazon Aurora User Guide. For more information on exporting DB cluster +data, see Exporting DB cluster data to Amazon S3 in the Amazon Aurora User Guide. # Arguments - `export_task_identifier`: A unique identifier for the export task. This ID isn't an diff --git a/src/services/redshift_data.jl b/src/services/redshift_data.jl index cad86c4447..033d46cc9b 100644 --- a/src/services/redshift_data.jl +++ b/src/services/redshift_data.jl @@ -5,8 +5,8 @@ using AWS.Compat using AWS.UUIDs """ - batch_execute_statement(database, sqls) - batch_execute_statement(database, sqls, params::Dict{String,<:Any}) + batch_execute_statement(sqls) + batch_execute_statement(sqls, params::Dict{String,<:Any}) Runs one or more SQL statements, which can be data manipulation language (DML) or data definition language (DDL). Depending on the authorization method, use one of the following @@ -31,8 +31,6 @@ is required. For more information about the Amazon Redshift Data API and CLI examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide. # Arguments -- `database`: The name of the database. This parameter is required when authenticating - using either Secrets Manager or temporary credentials. - `sqls`: One or more SQL statements to run. The SQL statements are run as a single transaction. They run serially in the order of the array. Subsequent SQL statements don't start until the previous statement in the array completes. If any SQL statement fails, then @@ -44,10 +42,16 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys idempotency of the request. - `"ClusterIdentifier"`: The cluster identifier. This parameter is required when connecting to a cluster and authenticating using either Secrets Manager or temporary credentials. +- `"Database"`: The name of the database. This parameter is required when authenticating + using either Secrets Manager or temporary credentials. - `"DbUser"`: The database user name. This parameter is required when connecting to a cluster as a database user and authenticating using temporary credentials. - `"SecretArn"`: The name or ARN of the secret that enables access to the database. This parameter is required when authenticating using Secrets Manager. +- `"SessionId"`: The session identifier of the query. +- `"SessionKeepAliveSeconds"`: The number of seconds to keep the session alive after the + query finishes. The maximum time a session can keep alive is 24 hours. After 24 hours, the + session is forced closed and the query is terminated. - `"StatementName"`: The name of the SQL statements. You can name the SQL statements when you create them to identify the query. - `"WithEvent"`: A value that indicates whether to send an event to the Amazon EventBridge @@ -56,32 +60,23 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys parameter is required when connecting to a serverless workgroup and authenticating using either Secrets Manager or temporary credentials. """ -function batch_execute_statement( - Database, Sqls; aws_config::AbstractAWSConfig=global_aws_config() -) +function batch_execute_statement(Sqls; aws_config::AbstractAWSConfig=global_aws_config()) return redshift_data( "BatchExecuteStatement", - Dict{String,Any}( - "Database" => Database, "Sqls" => Sqls, "ClientToken" => string(uuid4()) - ); + Dict{String,Any}("Sqls" => Sqls, "ClientToken" => string(uuid4())); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function batch_execute_statement( - Database, - Sqls, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), + Sqls, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return redshift_data( "BatchExecuteStatement", Dict{String,Any}( mergewith( _merge, - Dict{String,Any}( - "Database" => Database, "Sqls" => Sqls, "ClientToken" => string(uuid4()) - ), + Dict{String,Any}("Sqls" => Sqls, "ClientToken" => string(uuid4())), params, ), ); @@ -242,8 +237,8 @@ function describe_table( end """ - execute_statement(database, sql) - execute_statement(database, sql, params::Dict{String,<:Any}) + execute_statement(sql) + execute_statement(sql, params::Dict{String,<:Any}) Runs an SQL statement, which can be data manipulation language (DML) or data definition language (DDL). This statement must be a single SQL statement. Depending on the @@ -269,8 +264,6 @@ is required. For more information about the Amazon Redshift Data API and CLI examples, see Using the Amazon Redshift Data API in the Amazon Redshift Management Guide. # Arguments -- `database`: The name of the database. This parameter is required when authenticating - using either Secrets Manager or temporary credentials. - `sql`: The SQL statement text to run. # Optional Parameters @@ -279,11 +272,17 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys idempotency of the request. - `"ClusterIdentifier"`: The cluster identifier. This parameter is required when connecting to a cluster and authenticating using either Secrets Manager or temporary credentials. +- `"Database"`: The name of the database. This parameter is required when authenticating + using either Secrets Manager or temporary credentials. - `"DbUser"`: The database user name. This parameter is required when connecting to a cluster as a database user and authenticating using temporary credentials. - `"Parameters"`: The parameters for the SQL statement. - `"SecretArn"`: The name or ARN of the secret that enables access to the database. This parameter is required when authenticating using Secrets Manager. +- `"SessionId"`: The session identifier of the query. +- `"SessionKeepAliveSeconds"`: The number of seconds to keep the session alive after the + query finishes. The maximum time a session can keep alive is 24 hours. After 24 hours, the + session is forced closed and the query is terminated. - `"StatementName"`: The name of the SQL statement. You can name the SQL statement when you create it to identify the query. - `"WithEvent"`: A value that indicates whether to send an event to the Amazon EventBridge @@ -292,30 +291,23 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys parameter is required when connecting to a serverless workgroup and authenticating using either Secrets Manager or temporary credentials. """ -function execute_statement(Database, Sql; aws_config::AbstractAWSConfig=global_aws_config()) +function execute_statement(Sql; aws_config::AbstractAWSConfig=global_aws_config()) return redshift_data( "ExecuteStatement", - Dict{String,Any}( - "Database" => Database, "Sql" => Sql, "ClientToken" => string(uuid4()) - ); + Dict{String,Any}("Sql" => Sql, "ClientToken" => string(uuid4())); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function execute_statement( - Database, - Sql, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), + Sql, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return redshift_data( "ExecuteStatement", Dict{String,Any}( mergewith( _merge, - Dict{String,Any}( - "Database" => Database, "Sql" => Sql, "ClientToken" => string(uuid4()) - ), + Dict{String,Any}("Sql" => Sql, "ClientToken" => string(uuid4())), params, ), ); diff --git a/src/services/redshift_serverless.jl b/src/services/redshift_serverless.jl index a322e899d4..0fb80814d1 100644 --- a/src/services/redshift_serverless.jl +++ b/src/services/redshift_serverless.jl @@ -245,8 +245,8 @@ operation. action. This IAM role must allow the Amazon Redshift scheduler to schedule creating snapshots. (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. For more information about the IAM role to use with the Amazon Redshift scheduler, - see Using Identity-Based Policies for Amazon Redshift in the Amazon Redshift Cluster - Management Guide + see Using Identity-Based Policies for Amazon Redshift in the Amazon Redshift Management + Guide - `schedule`: The schedule for a one-time (at timestamp format) or recurring (cron format) scheduled action. Schedule invocations must be separated by at least one hour. Times are in UTC. Format of at timestamp is yyyy-mm-ddThh:mm:ss. For example, 2016-03-04T17:27:00. @@ -501,6 +501,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"enhancedVpcRouting"`: The value that specifies whether to turn on enhanced virtual private cloud (VPC) routing, which forces Amazon Redshift Serverless to route traffic through your VPC instead of over the internet. +- `"ipAddressType"`: The IP address type that the workgroup supports. Possible values are + ipv4 and dualstack. - `"maxCapacity"`: The maximum data-warehouse capacity Amazon Redshift Serverless uses to serve queries. The max capacity is specified in RPUs. - `"port"`: The custom port to use when connecting to a workgroup. Valid port ranges are @@ -2201,8 +2203,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys action. This IAM role must allow the Amazon Redshift scheduler to schedule creating snapshots (Principal scheduler.redshift.amazonaws.com) to assume permissions on your behalf. For more information about the IAM role to use with the Amazon Redshift scheduler, - see Using Identity-Based Policies for Amazon Redshift in the Amazon Redshift Cluster - Management Guide + see Using Identity-Based Policies for Amazon Redshift in the Amazon Redshift Management + Guide - `"schedule"`: The schedule for a one-time (at timestamp format) or recurring (cron format) scheduled action. Schedule invocations must be separated by at least one hour. Times are in UTC. Format of at timestamp is yyyy-mm-ddThh:mm:ss. For example, @@ -2389,6 +2391,8 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"enhancedVpcRouting"`: The value that specifies whether to turn on enhanced virtual private cloud (VPC) routing, which forces Amazon Redshift Serverless to route traffic through your VPC. +- `"ipAddressType"`: The IP address type that the workgroup supports. Possible values are + ipv4 and dualstack. - `"maxCapacity"`: The maximum data-warehouse capacity Amazon Redshift Serverless uses to serve queries. The max capacity is specified in RPUs. - `"port"`: The custom port to use when connecting to a workgroup. Valid port ranges are diff --git a/src/services/rekognition.jl b/src/services/rekognition.jl index ea1220c2fc..cc561675d6 100644 --- a/src/services/rekognition.jl +++ b/src/services/rekognition.jl @@ -349,6 +349,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys existing dataset or specify the Amazon S3 bucket location of an Amazon Sagemaker format manifest file. If you don't specify datasetSource, an empty dataset is created. To add labeled images to the dataset, You can use the console or call UpdateDatasetEntries. +- `"Tags"`: A set of tags (key-value pairs) that you want to attach to the dataset. """ function create_dataset( DatasetType, ProjectArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -441,6 +442,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys for Content Moderation. Applicable only to adapters. - `"Feature"`: Specifies feature that is being customized. If no value is provided CUSTOM_LABELS is used as a default. +- `"Tags"`: A set of tags (key-value pairs) that you want to attach to the project. """ function create_project(ProjectName; aws_config::AbstractAWSConfig=global_aws_config()) return rekognition( @@ -2148,7 +2150,9 @@ in the sample seen below. Use MaxResults parameter to limit the number of label If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetlabelDetection and populate the NextToken request -parameter with the token value returned from the previous call to GetLabelDetection. +parameter with the token value returned from the previous call to GetLabelDetection. If you +are retrieving results while using the Amazon Simple Notification Service, note that you +will receive an \"ERROR\" notification if the job encounters an issue. # Arguments - `job_id`: Job identifier for the label detection operation for which you want results diff --git a/src/services/resiliencehub.jl b/src/services/resiliencehub.jl index 55f64d15ed..1dd955516c 100644 --- a/src/services/resiliencehub.jl +++ b/src/services/resiliencehub.jl @@ -4,6 +4,52 @@ using AWS.AWSServices: resiliencehub using AWS.Compat using AWS.UUIDs +""" + accept_resource_grouping_recommendations(app_arn, entries) + accept_resource_grouping_recommendations(app_arn, entries, params::Dict{String,<:Any}) + +Accepts the resource grouping recommendations suggested by Resilience Hub for your +application. + +# Arguments +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference + guide. +- `entries`: Indicates the list of resource grouping recommendations you want to include in + your application. + +""" +function accept_resource_grouping_recommendations( + appArn, entries; aws_config::AbstractAWSConfig=global_aws_config() +) + return resiliencehub( + "POST", + "/accept-resource-grouping-recommendations", + Dict{String,Any}("appArn" => appArn, "entries" => entries); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function accept_resource_grouping_recommendations( + appArn, + entries, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return resiliencehub( + "POST", + "/accept-resource-grouping-recommendations", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("appArn" => appArn, "entries" => entries), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ add_draft_app_version_resource_mappings(app_arn, resource_mappings) add_draft_app_version_resource_mappings(app_arn, resource_mappings, params::Dict{String,<:Any}) @@ -11,7 +57,7 @@ using AWS.UUIDs Adds the source of resource-maps to the draft version of an application. During assessment, Resilience Hub will use these resource-maps to resolve the latest physical ID for each resource in the application template. For more information about different types of -resources suported by Resilience Hub and how to add them in your application, see Step 2: +resources supported by Resilience Hub and how to add them in your application, see Step 2: How is your application managed? in the Resilience Hub User Guide. # Arguments @@ -1017,7 +1063,7 @@ end describe_app_version_resource(app_arn, app_version, params::Dict{String,<:Any}) Describes a resource of the Resilience Hub application. This API accepts only one of the -following parameters to descibe the resource: resourceName logicalResourceId +following parameters to describe the resource: resourceName logicalResourceId physicalResourceId (Along with physicalResourceId, you can also provide awsAccountId, and awsRegion) @@ -1245,6 +1291,46 @@ function describe_resiliency_policy( ) end +""" + describe_resource_grouping_recommendation_task(app_arn) + describe_resource_grouping_recommendation_task(app_arn, params::Dict{String,<:Any}) + +Describes the resource grouping recommendation tasks run by Resilience Hub for your +application. + +# Arguments +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference + guide. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"groupingId"`: Indicates the identifier of the grouping recommendation task. +""" +function describe_resource_grouping_recommendation_task( + appArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return resiliencehub( + "POST", + "/describe-resource-grouping-recommendation-task", + Dict{String,Any}("appArn" => appArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_resource_grouping_recommendation_task( + appArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return resiliencehub( + "POST", + "/describe-resource-grouping-recommendation-task", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("appArn" => appArn), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ import_resources_to_draft_app_version(app_arn) import_resources_to_draft_app_version(app_arn, params::Dict{String,<:Any}) @@ -1351,9 +1437,8 @@ List of compliance drifts that were detected while running an assessment. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: -- `"maxResults"`: Indicates the maximum number of applications requested. -- `"nextToken"`: Indicates the unique token number of the next application to be checked - for compliance and regulatory requirements from the list of applications. +- `"maxResults"`: Indicates the maximum number of compliance drifts requested. +- `"nextToken"`: Null, or the token from a previous call to get the next set of results. """ function list_app_assessment_compliance_drifts( assessmentArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -1842,7 +1927,7 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"nextToken"`: Null, or the token from a previous call to get the next set of results. - `"reverseOrder"`: The application list is sorted based on the values of lastAppComplianceEvaluationTime field. By default, application list is sorted in ascending - order. To sort the appliation list in descending order, set this field to True. + order. To sort the application list in descending order, set this field to True. - `"toLastAssessmentTime"`: Indicates the upper limit of the range that is used to filter the applications based on their last assessment times. """ @@ -1936,6 +2021,45 @@ function list_resiliency_policies( ) end +""" + list_resource_grouping_recommendations() + list_resource_grouping_recommendations(params::Dict{String,<:Any}) + +Lists the resource grouping recommendations suggested by Resilience Hub for your +application. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"appArn"`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference + guide. +- `"maxResults"`: Maximum number of grouping recommendations to be displayed per Resilience + Hub application. +- `"nextToken"`: Null, or the token from a previous call to get the next set of results. +""" +function list_resource_grouping_recommendations(; + aws_config::AbstractAWSConfig=global_aws_config() +) + return resiliencehub( + "GET", + "/list-resource-grouping-recommendations"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_resource_grouping_recommendations( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return resiliencehub( + "GET", + "/list-resource-grouping-recommendations", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_sop_recommendations(assessment_arn) list_sop_recommendations(assessment_arn, params::Dict{String,<:Any}) @@ -2288,6 +2412,51 @@ function put_draft_app_version_template( ) end +""" + reject_resource_grouping_recommendations(app_arn, entries) + reject_resource_grouping_recommendations(app_arn, entries, params::Dict{String,<:Any}) + +Rejects resource grouping recommendations. + +# Arguments +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference + guide. +- `entries`: Indicates the list of resource grouping recommendations you have selected to + exclude from your application. + +""" +function reject_resource_grouping_recommendations( + appArn, entries; aws_config::AbstractAWSConfig=global_aws_config() +) + return resiliencehub( + "POST", + "/reject-resource-grouping-recommendations", + Dict{String,Any}("appArn" => appArn, "entries" => entries); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function reject_resource_grouping_recommendations( + appArn, + entries, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return resiliencehub( + "POST", + "/reject-resource-grouping-recommendations", + Dict{String,Any}( + mergewith( + _merge, Dict{String,Any}("appArn" => appArn, "entries" => entries), params + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ remove_draft_app_version_resource_mappings(app_arn) remove_draft_app_version_resource_mappings(app_arn, params::Dict{String,<:Any}) @@ -2450,6 +2619,42 @@ function start_app_assessment( ) end +""" + start_resource_grouping_recommendation_task(app_arn) + start_resource_grouping_recommendation_task(app_arn, params::Dict{String,<:Any}) + +Starts grouping recommendation task. + +# Arguments +- `app_arn`: Amazon Resource Name (ARN) of the Resilience Hub application. The format for + this ARN is: arn:partition:resiliencehub:region:account:app/app-id. For more information + about ARNs, see Amazon Resource Names (ARNs) in the Amazon Web Services General Reference + guide. + +""" +function start_resource_grouping_recommendation_task( + appArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return resiliencehub( + "POST", + "/start-resource-grouping-recommendation-task", + Dict{String,Any}("appArn" => appArn); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_resource_grouping_recommendation_task( + appArn, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return resiliencehub( + "POST", + "/start-resource-grouping-recommendation-task", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("appArn" => appArn), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ tag_resource(resource_arn, tags) tag_resource(resource_arn, tags, params::Dict{String,<:Any}) diff --git a/src/services/rolesanywhere.jl b/src/services/rolesanywhere.jl index 25794f50c5..62602676fa 100644 --- a/src/services/rolesanywhere.jl +++ b/src/services/rolesanywhere.jl @@ -19,6 +19,8 @@ You use profiles to intersect permissions with IAM managed policies. Required p # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"acceptRoleSessionName"`: Used to determine if a custom role session name will be + accepted in a temporary credential request. - `"durationSeconds"`: Used to determine how long sessions vended using this profile are valid for. See the Expiration section of the CreateSession API documentation page for more details. In requests, if this value is not provided, the default value will be 3600. @@ -1071,6 +1073,8 @@ permissions: rolesanywhere:UpdateProfile. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"acceptRoleSessionName"`: Used to determine if a custom role session name will be + accepted in a temporary credential request. - `"durationSeconds"`: Used to determine how long sessions vended using this profile are valid for. See the Expiration section of the CreateSession API documentation page for more details. In requests, if this value is not provided, the default value will be 3600. diff --git a/src/services/s3.jl b/src/services/s3.jl index d9db5a2659..83894399c7 100644 --- a/src/services/s3.jl +++ b/src/services/s3.jl @@ -15,12 +15,16 @@ progress, those part uploads might or might not succeed. As a result, it might b to abort a given multipart upload multiple times in order to completely free all storage consumed by all parts. To verify that all parts have been removed and prevent getting charged for the part storage, you should call the ListParts API operation and ensure that -the parts list is empty. Directory buckets - For directory buckets, you must make -requests for this API operation to the Zonal endpoint. These endpoints support -virtual-hosted-style requests in the format +the parts list is empty. Directory buckets - If multipart uploads in a directory bucket +are in progress, you can't delete the bucket until all the in-progress multipart uploads +are aborted or completed. To delete these in-progress multipart uploads, use the +ListMultipartUploads operation to list the in-progress multipart uploads in the bucket and +use the AbortMultupartUpload operation to abort all the in-progress multipart uploads. +Directory buckets - For directory buckets, you must make requests for this API operation to +the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User -Guide. Permissions General purpose bucket permissions - For information about +Guide. Permissions General purpose bucket permissions - For information about permissions required to use the multipart upload, see Multipart Upload and Permissions in the Amazon S3 User Guide. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API @@ -144,21 +148,23 @@ session token. With the session token in your request header, you can make API r this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session -expires. For more information about authorization, see CreateSession . Special errors - Error Code: EntityTooSmall Description: Your proposed upload is smaller than the -minimum allowed object size. Each part must be at least 5 MB in size, except the last part. - HTTP Status Code: 400 Bad Request Error Code: InvalidPart Description: One or more -of the specified parts could not be found. The part might not have been uploaded, or the -specified ETag might not have matched the uploaded part's ETag. HTTP Status Code: 400 Bad -Request Error Code: InvalidPartOrder Description: The list of parts was not in -ascending order. The parts list must be specified in order by part number. HTTP Status -Code: 400 Bad Request Error Code: NoSuchUpload Description: The specified multipart -upload does not exist. The upload ID might be invalid, or the multipart upload might have -been aborted or completed. HTTP Status Code: 404 Not Found HTTP Host header syntax -Directory buckets - The HTTP Host header syntax is -Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to -CompleteMultipartUpload: CreateMultipartUpload UploadPart AbortMultipartUpload - ListParts ListMultipartUploads +expires. For more information about authorization, see CreateSession . If you provide +an additional checksum value in your MultipartUpload requests and the object is encrypted +with Key Management Service, you must have permission to use the kms:Decrypt action for the +CompleteMultipartUpload request to succeed. Special errors Error Code: EntityTooSmall + Description: Your proposed upload is smaller than the minimum allowed object size. Each +part must be at least 5 MB in size, except the last part. HTTP Status Code: 400 Bad +Request Error Code: InvalidPart Description: One or more of the specified parts +could not be found. The part might not have been uploaded, or the specified ETag might not +have matched the uploaded part's ETag. HTTP Status Code: 400 Bad Request Error Code: +InvalidPartOrder Description: The list of parts was not in ascending order. The parts +list must be specified in order by part number. HTTP Status Code: 400 Bad Request +Error Code: NoSuchUpload Description: The specified multipart upload does not exist. The +upload ID might be invalid, or the multipart upload might have been aborted or completed. +HTTP Status Code: 404 Not Found HTTP Host header syntax Directory buckets - The +HTTP Host header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. The +following operations are related to CompleteMultipartUpload: CreateMultipartUpload +UploadPart AbortMultipartUpload ListParts ListMultipartUploads # Arguments - `bucket`: Name of the bucket to which the multipart upload was initiated. Directory @@ -189,6 +195,13 @@ CompleteMultipartUpload: CreateMultipartUpload UploadPart AbortMultip # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"CompleteMultipartUpload"`: The container for the multipart upload request information. +- `"If-None-Match"`: Uploads the object only if the object key name does not already exist + in the bucket specified. Otherwise, Amazon S3 returns a 412 Precondition Failed error. If a + conflicting operation occurs during the upload S3 returns a 409 ConditionalRequestConflict + response. On a 409 failure you should re-initiate the multipart upload with + CreateMultipartUpload and re-upload each part. Expects the '*' (asterisk) character. For + more information about conditional requests, see RFC 7232, or Conditional requests in the + Amazon S3 User Guide. - `"x-amz-checksum-crc32"`: This header can be used as a data integrity check to verify that the data received is the same data that was originally sent. This header specifies the base64-encoded, 32-bit CRC32 checksum of the object. For more information, see Checking @@ -262,61 +275,64 @@ a single atomic action using this API. However, to copy an object greater than 5 must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API. You can copy individual objects between general purpose buckets, between directory buckets, and between general -purpose buckets and directory buckets. Directory buckets - For directory buckets, you -must make requests for this API operation to the Zonal endpoint. These endpoints support -virtual-hosted-style requests in the format -https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are -not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User -Guide. Both the Region that you want to copy the object from and the Region that you want -to copy the object to must be enabled for your account. For more information about how to -enable a Region for your account, see Enable or disable a Region for standalone accounts in -the Amazon Web Services Account Management Guide. Amazon S3 transfer acceleration does not -support cross-Region copies. If you request a cross-Region copy using a transfer -acceleration endpoint, you get a 400 Bad Request error. For more information, see Transfer -Acceleration. Authentication and authorization All CopyObject requests must be -authenticated and signed by using IAM credentials (access key ID and secret access key for -the IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source, must -be signed. For more information, see REST Authentication. Directory buckets - You must use -the IAM credentials to authenticate and authorize your access to the CopyObject API -operation, instead of using the temporary security credentials through the CreateSession -API operation. Amazon Web Services CLI or SDKs handles authentication and authorization on -your behalf. Permissions You must have read access to the source object and write access -to the destination bucket. General purpose bucket permissions - You must have -permissions in an IAM policy based on the source and destination bucket types in a -CopyObject operation. If the source object is in a general purpose bucket, you must have -s3:GetObject permission to read the source object that is being copied. If the -destination bucket is a general purpose bucket, you must have s3:PutObject permission to -write the object copy to the destination bucket. Directory bucket permissions - You -must have permissions in a bucket policy or an IAM identity-based policy based on the -source and destination bucket types in a CopyObject operation. If the source object that -you want to copy is in a directory bucket, you must have the s3express:CreateSession -permission in the Action element of a policy to read the object. By default, the session is -in the ReadWrite mode. If you want to restrict the access, you can explicitly set the -s3express:SessionMode condition key to ReadOnly on the copy source bucket. If the copy -destination is a directory bucket, you must have the s3express:CreateSession permission -in the Action element of a policy to write the object to the destination. The -s3express:SessionMode condition key can't be set to ReadOnly on the copy destination -bucket. For example policies, see Example bucket policies for S3 Express One Zone and -Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 -Express One Zone in the Amazon S3 User Guide. Response and special errors When the -request is an HTTP 1.1 request, the response is chunk encoded. When the request is not an -HTTP 1.1 request, the response would not contain the Content-Length. You always need to -read the entire response body to check if the copy succeeds. to keep the connection alive -while we copy the data. If the copy is successful, you receive a response with -information about the copied object. A copy request might return an error when Amazon S3 -receives the copy request or while Amazon S3 is copying the files. A 200 OK response can -contain either a success or an error. If the error occurs before the copy action starts, -you receive a standard Amazon S3 error. If the error occurs during the copy operation, -the error response is embedded in the 200 OK response. For example, in a cross-region copy, -you may encounter throttling and receive a 200 OK response. For more information, see -Resolve the Error 200 response when copying objects to Amazon S3. The 200 OK status code -means the copy was accepted, but it doesn't mean the copy is complete. Another example is -when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the -copy and you may receive a 200 OK response. You must stay connected to Amazon S3 until the -entire response is successfully received and processed. If you call this API operation -directly, make sure to design your application to parse the content of the response and -handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. -The SDKs detect the embedded error and apply error handling per your configuration settings +purpose buckets and directory buckets. Amazon S3 supports copy operations using +Multi-Region Access Points only as a destination when using the Multi-Region Access Point +ARN. Directory buckets - For directory buckets, you must make requests for this API +operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in +the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style +requests are not supported. For more information, see Regional and Zonal endpoints in the +Amazon S3 User Guide. VPC endpoints don't support cross-Region requests (including +copies). If you're using VPC endpoints, your source and destination buckets should be in +the same Amazon Web Services Region as your VPC endpoint. Both the Region that you want +to copy the object from and the Region that you want to copy the object to must be enabled +for your account. For more information about how to enable a Region for your account, see +Enable or disable a Region for standalone accounts in the Amazon Web Services Account +Management Guide. Amazon S3 transfer acceleration does not support cross-Region copies. If +you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad +Request error. For more information, see Transfer Acceleration. Authentication and +authorization All CopyObject requests must be authenticated and signed by using IAM +credentials (access key ID and secret access key for the IAM identities). All headers with +the x-amz- prefix, including x-amz-copy-source, must be signed. For more information, see +REST Authentication. Directory buckets - You must use the IAM credentials to authenticate +and authorize your access to the CopyObject API operation, instead of using the temporary +security credentials through the CreateSession API operation. Amazon Web Services CLI or +SDKs handles authentication and authorization on your behalf. Permissions You must have +read access to the source object and write access to the destination bucket. General +purpose bucket permissions - You must have permissions in an IAM policy based on the source +and destination bucket types in a CopyObject operation. If the source object is in a +general purpose bucket, you must have s3:GetObject permission to read the source object +that is being copied. If the destination bucket is a general purpose bucket, you must +have s3:PutObject permission to write the object copy to the destination bucket. +Directory bucket permissions - You must have permissions in a bucket policy or an IAM +identity-based policy based on the source and destination bucket types in a CopyObject +operation. If the source object that you want to copy is in a directory bucket, you must +have the s3express:CreateSession permission in the Action element of a policy to read the +object. By default, the session is in the ReadWrite mode. If you want to restrict the +access, you can explicitly set the s3express:SessionMode condition key to ReadOnly on the +copy source bucket. If the copy destination is a directory bucket, you must have the +s3express:CreateSession permission in the Action element of a policy to write the object +to the destination. The s3express:SessionMode condition key can't be set to ReadOnly on the +copy destination bucket. For example policies, see Example bucket policies for S3 +Express One Zone and Amazon Web Services Identity and Access Management (IAM) +identity-based policies for S3 Express One Zone in the Amazon S3 User Guide. Response +and special errors When the request is an HTTP 1.1 request, the response is chunk encoded. +When the request is not an HTTP 1.1 request, the response would not contain the +Content-Length. You always need to read the entire response body to check if the copy +succeeds. If the copy is successful, you receive a response with information about the +copied object. A copy request might return an error when Amazon S3 receives the copy +request or while Amazon S3 is copying the files. A 200 OK response can contain either a +success or an error. If the error occurs before the copy action starts, you receive a +standard Amazon S3 error. If the error occurs during the copy operation, the error +response is embedded in the 200 OK response. For example, in a cross-region copy, you may +encounter throttling and receive a 200 OK response. For more information, see Resolve the +Error 200 response when copying objects to Amazon S3. The 200 OK status code means the copy +was accepted, but it doesn't mean the copy is complete. Another example is when you +disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and +you may receive a 200 OK response. You must stay connected to Amazon S3 until the entire +response is successfully received and processed. If you call this API operation directly, +make sure to design your application to parse the content of the response and handle it +appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs +detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error). Charge The copy request charge is based on the storage class and Region that @@ -809,48 +825,48 @@ regular requests. You initiate a multipart upload, send one or more requests to parts, and then complete the multipart upload process. You sign each request individually. There is nothing special about signing multipart upload requests. For more information about signing, see Authenticating Requests (Amazon Web Services Signature Version 4) in the -Amazon S3 User Guide. Permissions General purpose bucket permissions - For information -about the permissions required to use the multipart upload API, see Multipart upload and -permissions in the Amazon S3 User Guide. To perform a multipart upload with encryption by -using an Amazon Web Services KMS key, the requester must have permission to the kms:Decrypt -and kms:GenerateDataKey* actions on the key. These permissions are required because Amazon -S3 must decrypt and read data from the encrypted file parts before it completes the -multipart upload. For more information, see Multipart upload API and permissions and -Protecting data using server-side encryption with Amazon Web Services KMS in the Amazon S3 -User Guide. Directory bucket permissions - To grant access to this API operation on a -directory bucket, we recommend that you use the CreateSession API operation for -session-based authorization. Specifically, you grant the s3express:CreateSession permission -to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make -the CreateSession API call on the bucket to obtain a session token. With the session token -in your request header, you can make API requests to this operation. After the session -token expires, you make another CreateSession API call to generate a new session token for -use. Amazon Web Services CLI or SDKs create session and refresh the session token -automatically to avoid service interruptions when a session expires. For more information -about authorization, see CreateSession . Encryption General purpose buckets - -Server-side encryption is for data encryption at rest. Amazon S3 encrypts your data as it -writes it to disks in its data centers and decrypts it when you access it. Amazon S3 -automatically encrypts all new objects that are uploaded to an S3 bucket. When doing a -multipart upload, if you don't specify encryption information in your request, the -encryption setting of the uploaded parts is set to the default encryption configuration of -the destination bucket. By default, all buckets have a base level of encryption -configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). If the -destination bucket has a default encryption configuration that uses server-side encryption -with an Key Management Service (KMS) key (SSE-KMS), or a customer-provided encryption key -(SSE-C), Amazon S3 uses the corresponding KMS key, or a customer-provided key to encrypt -the uploaded parts. When you perform a CreateMultipartUpload operation, if you want to use -a different type of encryption setting for the uploaded parts, you can request that Amazon -S3 encrypts the object with a different encryption key (such as an Amazon S3 managed key, a -KMS key, or a customer-provided key). When the encryption setting in your request is -different from the default encryption configuration of the destination bucket, the -encryption setting in your request takes precedence. If you choose to provide your own -encryption key, the request headers you provide in UploadPart and UploadPartCopy requests -must match the headers you used in the CreateMultipartUpload request. Use KMS keys -(SSE-KMS) that include the Amazon Web Services managed key (aws/s3) and KMS customer -managed keys stored in Key Management Service (KMS) – If you want Amazon Web Services to -manage the keys used to encrypt data, specify the following headers in the request. -x-amz-server-side-encryption x-amz-server-side-encryption-aws-kms-key-id -x-amz-server-side-encryption-context If you specify -x-amz-server-side-encryption:aws:kms, but don't provide +Amazon S3 User Guide. Permissions General purpose bucket permissions - To perform a +multipart upload with encryption using an Key Management Service (KMS) KMS key, the +requester must have permission to the kms:Decrypt and kms:GenerateDataKey actions on the +key. The requester must also have permissions for the kms:GenerateDataKey action for the +CreateMultipartUpload API. Then, the requester needs permissions for the kms:Decrypt action +on the UploadPart and UploadPartCopy APIs. These permissions are required because Amazon S3 +must decrypt and read data from the encrypted file parts before it completes the multipart +upload. For more information, see Multipart upload API and permissions and Protecting data +using server-side encryption with Amazon Web Services KMS in the Amazon S3 User Guide. +Directory bucket permissions - To grant access to this API operation on a directory bucket, +we recommend that you use the CreateSession API operation for session-based +authorization. Specifically, you grant the s3express:CreateSession permission to the +directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the +CreateSession API call on the bucket to obtain a session token. With the session token in +your request header, you can make API requests to this operation. After the session token +expires, you make another CreateSession API call to generate a new session token for use. +Amazon Web Services CLI or SDKs create session and refresh the session token automatically +to avoid service interruptions when a session expires. For more information about +authorization, see CreateSession . Encryption General purpose buckets - Server-side +encryption is for data encryption at rest. Amazon S3 encrypts your data as it writes it to +disks in its data centers and decrypts it when you access it. Amazon S3 automatically +encrypts all new objects that are uploaded to an S3 bucket. When doing a multipart upload, +if you don't specify encryption information in your request, the encryption setting of the +uploaded parts is set to the default encryption configuration of the destination bucket. By +default, all buckets have a base level of encryption configuration that uses server-side +encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a default +encryption configuration that uses server-side encryption with an Key Management Service +(KMS) key (SSE-KMS), or a customer-provided encryption key (SSE-C), Amazon S3 uses the +corresponding KMS key, or a customer-provided key to encrypt the uploaded parts. When you +perform a CreateMultipartUpload operation, if you want to use a different type of +encryption setting for the uploaded parts, you can request that Amazon S3 encrypts the +object with a different encryption key (such as an Amazon S3 managed key, a KMS key, or a +customer-provided key). When the encryption setting in your request is different from the +default encryption configuration of the destination bucket, the encryption setting in your +request takes precedence. If you choose to provide your own encryption key, the request +headers you provide in UploadPart and UploadPartCopy requests must match the headers you +used in the CreateMultipartUpload request. Use KMS keys (SSE-KMS) that include the Amazon +Web Services managed key (aws/s3) and KMS customer managed keys stored in Key Management +Service (KMS) – If you want Amazon Web Services to manage the keys used to encrypt data, +specify the following headers in the request. x-amz-server-side-encryption +x-amz-server-side-encryption-aws-kms-key-id x-amz-server-side-encryption-context +If you specify x-amz-server-side-encryption:aws:kms, but don't provide x-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses the Amazon Web Services managed key (aws/s3 key) in KMS to protect the data. To perform a multipart upload with encryption by using an Amazon Web Services KMS key, the requester must have permission to @@ -2012,7 +2028,7 @@ Permissions General purpose bucket permissions - The following permissions a in your policies when your DeleteObjects request includes specific headers. s3:DeleteObject - To delete an object from a bucket, you must always specify the s3:DeleteObject permission. s3:DeleteObjectVersion - To delete a specific version of -an object from a versiong-enabled bucket, you must specify the s3:DeleteObjectVersion +an object from a versioning-enabled bucket, you must specify the s3:DeleteObjectVersion permission. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission @@ -3363,7 +3379,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys directory buckets. For this API operation, only the null value of the version ID is supported by directory buckets. You can only specify null to the versionId query parameter in the request. For more information about versioning, see PutBucketVersioning. -- `"x-amz-checksum-mode"`: To retrieve the checksum, this mode must be enabled. +- `"x-amz-checksum-mode"`: To retrieve the checksum, this mode must be enabled. In + addition, if you enable checksum mode and the object is uploaded with a checksum and + encrypted with an Key Management Service (KMS) key, you must have permission to use the + kms:Decrypt action to retrieve the checksum. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). @@ -3961,32 +3980,34 @@ end You can use this operation to determine if a bucket exists and if you have permission to access it. The action returns a 200 OK if the bucket exists and you have permission to -access it. If the bucket does not exist or you do not have permission to access it, the +access it. If the bucket does not exist or you do not have permission to access it, the HEAD request returns a generic 400 Bad Request, 403 Forbidden or 404 Not Found code. A message body is not included, so you cannot determine the exception beyond these HTTP -response codes. Directory buckets - You must make requests for this API operation to the -Zonal endpoint. These endpoints support virtual-hosted-style requests in the format -https://bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests are not -supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User -Guide. Authentication and authorization All HeadBucket requests must be authenticated -and signed by using IAM credentials (access key ID and secret access key for the IAM -identities). All headers with the x-amz- prefix, including x-amz-copy-source, must be -signed. For more information, see REST Authentication. Directory bucket - You must use IAM -credentials to authenticate and authorize your access to the HeadBucket API operation, -instead of using the temporary security credentials through the CreateSession API -operation. Amazon Web Services CLI or SDKs handles authentication and authorization on your -behalf. Permissions General purpose bucket permissions - To use this operation, you -must have permissions to perform the s3:ListBucket action. The bucket owner has this -permission by default and can grant this permission to others. For more information about -permissions, see Managing access permissions to your Amazon S3 resources in the Amazon S3 -User Guide. Directory bucket permissions - You must have the s3express:CreateSession -permission in the Action element of a policy. By default, the session is in the ReadWrite -mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode -condition key to ReadOnly on the bucket. For more information about example bucket -policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services -Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the -Amazon S3 User Guide. HTTP Host header syntax Directory buckets - The HTTP Host -header syntax is Bucket_name.s3express-az_id.region.amazonaws.com. +response codes. Authentication and authorization General purpose buckets - Request to +public buckets that grant the s3:ListBucket permission publicly do not need to be signed. +All other HeadBucket requests must be authenticated and signed by using IAM credentials +(access key ID and secret access key for the IAM identities). All headers with the x-amz- +prefix, including x-amz-copy-source, must be signed. For more information, see REST +Authentication. Directory buckets - You must use IAM credentials to authenticate and +authorize your access to the HeadBucket API operation, instead of using the temporary +security credentials through the CreateSession API operation. Amazon Web Services CLI or +SDKs handles authentication and authorization on your behalf. Permissions General +purpose bucket permissions - To use this operation, you must have permissions to perform +the s3:ListBucket action. The bucket owner has this permission by default and can grant +this permission to others. For more information about permissions, see Managing access +permissions to your Amazon S3 resources in the Amazon S3 User Guide. Directory bucket +permissions - You must have the s3express:CreateSession permission in the Action element +of a policy. By default, the session is in the ReadWrite mode. If you want to restrict the +access, you can explicitly set the s3express:SessionMode condition key to ReadOnly on the +bucket. For more information about example bucket policies, see Example bucket policies for +S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) +identity-based policies for S3 Express One Zone in the Amazon S3 User Guide. HTTP Host +header syntax Directory buckets - The HTTP Host header syntax is +Bucket_name.s3express-az_id.region.amazonaws.com. You must make requests for this API +operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in +the format https://bucket_name.s3express-az_id.region.amazonaws.com. Path-style requests +are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 +User Guide. # Arguments - `bucket`: The bucket name. Directory buckets - When you use this operation with a @@ -4037,27 +4058,23 @@ end head_object(bucket, key, params::Dict{String,<:Any}) The HEAD operation retrieves metadata from an object without returning the object itself. -This operation is useful if you're interested only in an object's metadata. A HEAD request +This operation is useful if you're interested only in an object's metadata. A HEAD request has the same options as a GET operation on an object. The response is identical to the GET response except that there is no response body. Because of this, if the HEAD request generates an error, it returns a generic code, such as 400 Bad Request, 403 Forbidden, 404 Not Found, 405 Method Not Allowed, 412 Precondition Failed, or 304 Not Modified. It's not -possible to retrieve the exact exception of these error codes. Request headers are limited -to 8 KB in size. For more information, see Common Request Headers. Directory buckets - -For directory buckets, you must make requests for this API operation to the Zonal endpoint. -These endpoints support virtual-hosted-style requests in the format -https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are -not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User -Guide. Permissions General purpose bucket permissions - To use HEAD, you must have -the s3:GetObject permission. You need the relevant read object (or version) permission for -this operation. For more information, see Actions, resources, and condition keys for Amazon -S3 in the Amazon S3 User Guide. If the object you request doesn't exist, the error that -Amazon S3 returns depends on whether you also have the s3:ListBucket permission. If you -have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 -Not Found error. If you don’t have the s3:ListBucket permission, Amazon S3 returns an -HTTP status code 403 Forbidden error. Directory bucket permissions - To grant access -to this API operation on a directory bucket, we recommend that you use the CreateSession -API operation for session-based authorization. Specifically, you grant the +possible to retrieve the exact exception of these error codes. Request headers are limited +to 8 KB in size. For more information, see Common Request Headers. Permissions +General purpose bucket permissions - To use HEAD, you must have the s3:GetObject +permission. You need the relevant read object (or version) permission for this operation. +For more information, see Actions, resources, and condition keys for Amazon S3 in the +Amazon S3 User Guide. If the object you request doesn't exist, the error that Amazon S3 +returns depends on whether you also have the s3:ListBucket permission. If you have the +s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 Not Found +error. If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP status +code 403 Forbidden error. Directory bucket permissions - To grant access to this API +operation on a directory bucket, we recommend that you use the CreateSession API +operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to @@ -4091,8 +4108,13 @@ buckets - S3 Versioning isn't enabled and supported for directory buckets. For t operation, only the null value of the version ID is supported by directory buckets. You can only specify null to the versionId query parameter in the request. HTTP Host header syntax Directory buckets - The HTTP Host header syntax is -Bucket_name.s3express-az_id.region.amazonaws.com. The following actions are related to -HeadObject: GetObject GetObjectAttributes +Bucket_name.s3express-az_id.region.amazonaws.com. For directory buckets, you must make +requests for this API operation to the Zonal endpoint. These endpoints support +virtual-hosted-style requests in the format +https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are +not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User +Guide. The following actions are related to HeadObject: GetObject +GetObjectAttributes # Arguments - `bucket`: The name of the bucket that contains the object. Directory buckets - When you @@ -4150,13 +4172,19 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"partNumber"`: Part number of the object being read. This is a positive integer between 1 and 10,000. Effectively performs a 'ranged' HEAD request for the part specified. Useful querying about the size of the part and the number of parts in this object. +- `"response-cache-control"`: Sets the Cache-Control header of the response. +- `"response-content-disposition"`: Sets the Content-Disposition header of the response. +- `"response-content-encoding"`: Sets the Content-Encoding header of the response. +- `"response-content-language"`: Sets the Content-Language header of the response. +- `"response-content-type"`: Sets the Content-Type header of the response. +- `"response-expires"`: Sets the Expires header of the response. - `"versionId"`: Version ID used to reference a specific version of the object. For directory buckets in this API operation, only the null value of the version ID is supported. - `"x-amz-checksum-mode"`: To retrieve the checksum, this parameter must be enabled. In - addition, if you enable ChecksumMode and the object is encrypted with Amazon Web Services - Key Management Service (Amazon Web Services KMS), you must have permission to use the - kms:Decrypt action for the request to succeed. + addition, if you enable checksum mode and the object is uploaded with a checksum and + encrypted with an Key Management Service (KMS) key, you must have permission to use the + kms:Decrypt action to retrieve the checksum. - `"x-amz-expected-bucket-owner"`: The account ID of the expected bucket owner. If the account ID that you provide does not match the actual owner of the bucket, the request fails with the HTTP status code 403 Forbidden (access denied). @@ -4420,6 +4448,15 @@ by the authenticated sender of the request. To use this operation, you must have s3:ListAllMyBuckets permission. For information about Amazon S3 buckets, see Creating, configuring, and working with Amazon S3 buckets. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"continuation-token"`: ContinuationToken indicates to Amazon S3 that the list is being + continued on this bucket with a token. ContinuationToken is obfuscated and is not a real + key. You can use this ContinuationToken for pagination of the list results. Length + Constraints: Minimum length of 0. Maximum length of 1024. Required: No. +- `"max-buckets"`: Maximum number of buckets to be returned in response. When the number is + more than the count of buckets that are owned by an Amazon Web Services account, return all + the buckets in response. """ function list_buckets(; aws_config::AbstractAWSConfig=global_aws_config()) return s3("GET", "/"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET) @@ -4452,8 +4489,9 @@ s3express-control.region.amazonaws.com. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"continuation-token"`: ContinuationToken indicates to Amazon S3 that the list is being - continued on this bucket with a token. ContinuationToken is obfuscated and is not a real - key. You can use this ContinuationToken for pagination of the list results. + continued on buckets in this account with a token. ContinuationToken is obfuscated and is + not a real bucket name. You can use this ContinuationToken for the pagination of the list + results. - `"max-directory-buckets"`: Maximum number of buckets to be returned in response. When the number is more than the count of buckets that are owned by an Amazon Web Services account, return all the buckets in response. @@ -4475,30 +4513,32 @@ This operation lists in-progress multipart uploads in a bucket. An in-progress m upload is a multipart upload that has been initiated by the CreateMultipartUpload request, but has not yet been completed or aborted. Directory buckets - If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress -multipart uploads are aborted or completed. The ListMultipartUploads operation returns a -maximum of 1,000 multipart uploads in the response. The limit of 1,000 multipart uploads is -also the default value. You can further limit the number of uploads in a response by -specifying the max-uploads request parameter. If there are more than 1,000 multipart -uploads that satisfy your ListMultipartUploads request, the response returns an IsTruncated -element with the value of true, a NextKeyMarker element, and a NextUploadIdMarker element. -To list the remaining multipart uploads, you need to make subsequent ListMultipartUploads -requests. In these requests, include two query parameters: key-marker and upload-id-marker. -Set the value of key-marker to the NextKeyMarker value from the previous response. -Similarly, set the value of upload-id-marker to the NextUploadIdMarker value from the -previous response. Directory buckets - The upload-id-marker element and the -NextUploadIdMarker element aren't supported by directory buckets. To list the additional -multipart uploads, you only need to set the value of key-marker to the NextKeyMarker value -from the previous response. For more information about multipart uploads, see Uploading -Objects Using Multipart Upload in the Amazon S3 User Guide. Directory buckets - For -directory buckets, you must make requests for this API operation to the Zonal endpoint. -These endpoints support virtual-hosted-style requests in the format -https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are -not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User -Guide. Permissions General purpose bucket permissions - For information about -permissions required to use the multipart upload API, see Multipart Upload and Permissions -in the Amazon S3 User Guide. Directory bucket permissions - To grant access to this API -operation on a directory bucket, we recommend that you use the CreateSession API -operation for session-based authorization. Specifically, you grant the +multipart uploads are aborted or completed. To delete these in-progress multipart uploads, +use the ListMultipartUploads operation to list the in-progress multipart uploads in the +bucket and use the AbortMultupartUpload operation to abort all the in-progress multipart +uploads. The ListMultipartUploads operation returns a maximum of 1,000 multipart uploads +in the response. The limit of 1,000 multipart uploads is also the default value. You can +further limit the number of uploads in a response by specifying the max-uploads request +parameter. If there are more than 1,000 multipart uploads that satisfy your +ListMultipartUploads request, the response returns an IsTruncated element with the value of +true, a NextKeyMarker element, and a NextUploadIdMarker element. To list the remaining +multipart uploads, you need to make subsequent ListMultipartUploads requests. In these +requests, include two query parameters: key-marker and upload-id-marker. Set the value of +key-marker to the NextKeyMarker value from the previous response. Similarly, set the value +of upload-id-marker to the NextUploadIdMarker value from the previous response. Directory +buckets - The upload-id-marker element and the NextUploadIdMarker element aren't supported +by directory buckets. To list the additional multipart uploads, you only need to set the +value of key-marker to the NextKeyMarker value from the previous response. For more +information about multipart uploads, see Uploading Objects Using Multipart Upload in the +Amazon S3 User Guide. Directory buckets - For directory buckets, you must make requests +for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style +requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . +Path-style requests are not supported. For more information, see Regional and Zonal +endpoints in the Amazon S3 User Guide. Permissions General purpose bucket permissions +- For information about permissions required to use the multipart upload API, see Multipart +Upload and Permissions in the Amazon S3 User Guide. Directory bucket permissions - To +grant access to this API operation on a directory bucket, we recommend that you use the +CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to @@ -4736,12 +4776,15 @@ the request parameters as selection criteria to return a subset of the objects i A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately. For more information about listing objects, see Listing object keys programmatically in the Amazon S3 User Guide. To -get a list of your buckets, see ListBuckets. Directory buckets - For directory buckets, -you must make requests for this API operation to the Zonal endpoint. These endpoints -support virtual-hosted-style requests in the format +get a list of your buckets, see ListBuckets. General purpose bucket - For general +purpose buckets, ListObjectsV2 doesn't return prefixes that are related only to in-progress +multipart uploads. Directory buckets - For directory buckets, ListObjectsV2 response +includes the prefixes that are related only to in-progress multipart uploads. Directory +buckets - For directory buckets, you must make requests for this API operation to the Zonal +endpoint. These endpoints support virtual-hosted-style requests in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User -Guide. Permissions General purpose bucket permissions - To use this operation, you +Guide. Permissions General purpose bucket permissions - To use this operation, you must have READ access to the bucket. You must have permission to perform the s3:ListBucket action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket @@ -4800,9 +4843,15 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys CommonPrefixes response parameter contains the prefixes that are associated with the in-progress multipart uploads. For more information about multipart uploads, see Multipart Upload Overview in the Amazon S3 User Guide. -- `"encoding-type"`: Encoding type used by Amazon S3 to encode object keys in the response. - If using url, non-ASCII characters used in an object's key name will be URL encoded. For - example, the object test_file(3).png will appear as test_file%283%29.png. +- `"encoding-type"`: Encoding type used by Amazon S3 to encode the object keys in the + response. Responses are encoded only in UTF-8. An object key can contain any Unicode + character. However, the XML 1.0 parser can't parse certain characters, such as characters + with an ASCII value from 0 to 10. For characters that aren't supported in XML 1.0, you can + add this parameter to request that Amazon S3 encode the keys in the response. For more + information about characters to avoid in object key names, see Object key naming + guidelines. When using the URL encoding type, non-ASCII characters that are used in an + object's key name will be percent-encoded according to UTF-8 code values. For example, the + object test_file(3).png will appear as test_file%283%29.png. - `"fetch-owner"`: The owner field is not present in ListObjectsV2 by default. If you want to return the owner field with each key in the result, then set the FetchOwner field to true. Directory buckets - For directory buckets, the bucket owner is returned as the @@ -5315,15 +5364,18 @@ default encryption for a bucket by using server-side encryption with Key Managem (DSSE-KMS). If you specify default encryption by using SSE-KMS, you can also configure Amazon S3 Bucket Keys. If you use PutBucketEncryption to set your default bucket encryption to SSE-KMS, you should verify that your KMS key ID is correct. Amazon S3 does not validate -the KMS key ID provided in PutBucketEncryption requests. This action requires Amazon Web -Services Signature Version 4. For more information, see Authenticating Requests (Amazon -Web Services Signature Version 4). To use this operation, you must have permission to -perform the s3:PutEncryptionConfiguration action. The bucket owner has this permission by -default. The bucket owner can grant this permission to others. For more information about -permissions, see Permissions Related to Bucket Subresource Operations and Managing Access -Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide. The following -operations are related to PutBucketEncryption: GetBucketEncryption -DeleteBucketEncryption +the KMS key ID provided in PutBucketEncryption requests. If you're specifying a customer +managed KMS key, we recommend using a fully qualified KMS key ARN. If you use a KMS key +alias instead, then KMS resolves the key within the requester’s account. This behavior +can result in data that's encrypted with a KMS key that belongs to the requester, and not +the bucket owner. Also, this action requires Amazon Web Services Signature Version 4. For +more information, see Authenticating Requests (Amazon Web Services Signature Version 4). +To use this operation, you must have permission to perform the +s3:PutEncryptionConfiguration action. The bucket owner has this permission by default. The +bucket owner can grant this permission to others. For more information about permissions, +see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to +Your Amazon S3 Resources in the Amazon S3 User Guide. The following operations are related +to PutBucketEncryption: GetBucketEncryption DeleteBucketEncryption # Arguments - `bucket`: Specifies default encryption for a bucket using server-side encryption with @@ -6389,11 +6441,14 @@ end put_bucket_versioning(bucket, versioning_configuration) put_bucket_versioning(bucket, versioning_configuration, params::Dict{String,<:Any}) - This operation is not supported by directory buckets. Sets the versioning state of an -existing bucket. You can set the versioning state with one of the following values: -Enabled—Enables versioning for the objects in the bucket. All objects added to the bucket -receive a unique version ID. Suspended—Disables versioning for the objects in the -bucket. All objects added to the bucket receive the version ID null. If the versioning + This operation is not supported by directory buckets. When you enable versioning on a +bucket for the first time, it might take a short amount of time for the change to be fully +propagated. We recommend that you wait for 15 minutes after enabling versioning before +issuing write operations (PUT or DELETE) on objects in the bucket. Sets the versioning +state of an existing bucket. You can set the versioning state with one of the following +values: Enabled—Enables versioning for the objects in the bucket. All objects added to +the bucket receive a unique version ID. Suspended—Disables versioning for the objects in +the bucket. All objects added to the bucket receive the version ID null. If the versioning state has never been set on a bucket, it has no versioning state; a GetBucketVersioning request does not return a versioning state value. In order to enable MFA Delete, you must be the bucket owner. If you are the bucket owner and want to enable MFA Delete in the @@ -6650,6 +6705,12 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys information, see https://www.rfc-editor.org/rfc/rfc9110.html#name-content-type. - `"Expires"`: The date and time at which the object is no longer cacheable. For more information, see https://www.rfc-editor.org/rfc/rfc7234#section-5.3. +- `"If-None-Match"`: Uploads the object only if the object key name does not already exist + in the bucket specified. Otherwise, Amazon S3 returns a 412 Precondition Failed error. If a + conflicting operation occurs during the upload S3 returns a 409 ConditionalRequestConflict + response. On a 409 failure you should retry the upload. Expects the '*' (asterisk) + character. For more information about conditional requests, see RFC 7232, or Conditional + requests in the Amazon S3 User Guide. - `"x-amz-acl"`: The canned ACL to apply to the object. For more information, see Canned ACL in the Amazon S3 User Guide. When adding a new object, you can use headers to grant ACL-based permissions to individual Amazon Web Services accounts or to predefined groups @@ -7312,18 +7373,20 @@ end restore_object(bucket, key) restore_object(bucket, key, params::Dict{String,<:Any}) - This operation is not supported by directory buckets. Restores an archived copy of an -object back into Amazon S3 This functionality is not supported for Amazon S3 on Outposts. -This action performs the following types of requests: restore an archive - Restore an -archived object For more information about the S3 structure in the request body, see the -following: PutObject Managing Access with ACLs in the Amazon S3 User Guide -Protecting Data Using Server-Side Encryption in the Amazon S3 User Guide Permissions -To use this operation, you must have permissions to perform the s3:RestoreObject action. -The bucket owner has this permission by default and can grant this permission to others. -For more information about permissions, see Permissions Related to Bucket Subresource -Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 -User Guide. Restoring objects Objects that you archive to the S3 Glacier Flexible -Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage class, and S3 + This operation is not supported by directory buckets. The SELECT job type for the +RestoreObject operation is no longer available to new customers. Existing customers of +Amazon S3 Select can continue to use the feature as usual. Learn more Restores an +archived copy of an object back into Amazon S3 This functionality is not supported for +Amazon S3 on Outposts. This action performs the following types of requests: restore an +archive - Restore an archived object For more information about the S3 structure in the +request body, see the following: PutObject Managing Access with ACLs in the Amazon +S3 User Guide Protecting Data Using Server-Side Encryption in the Amazon S3 User Guide + Permissions To use this operation, you must have permissions to perform the +s3:RestoreObject action. The bucket owner has this permission by default and can grant this +permission to others. For more information about permissions, see Permissions Related to +Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources +in the Amazon S3 User Guide. Restoring objects Objects that you archive to the S3 Glacier +Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers, are not accessible in real time. For objects in the S3 Glacier Flexible Retrieval Flexible Retrieval or S3 Glacier Deep Archive storage classes, you must first initiate a restore @@ -7448,8 +7511,10 @@ end select_object_content(bucket, expression, expression_type, input_serialization, key, output_serialization) select_object_content(bucket, expression, expression_type, input_serialization, key, output_serialization, params::Dict{String,<:Any}) - This operation is not supported by directory buckets. This action filters the contents of -an Amazon S3 object based on a simple structured query language (SQL) statement. In the + This operation is not supported by directory buckets. The SelectObjectContent operation +is no longer available to new customers. Existing customers of Amazon S3 Select can +continue to use the operation as usual. Learn more This action filters the contents of an +Amazon S3 object based on a simple structured query language (SQL) statement. In the request, along with the SQL expression, you must also specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses this format to parse object data into records, and returns only records that match the specified SQL expression. You @@ -7608,8 +7673,16 @@ API operation to the Zonal endpoint. These endpoints support virtual-hosted-styl in the format https://bucket_name.s3express-az_id.region.amazonaws.com/key-name . Path-style requests are not supported. For more information, see Regional and Zonal endpoints in the Amazon S3 User Guide. Permissions General purpose bucket permissions -- For information on the permissions required to use the multipart upload API, see -Multipart Upload and Permissions in the Amazon S3 User Guide. Directory bucket +- To perform a multipart upload with encryption using an Key Management Service key, the +requester must have permission to the kms:Decrypt and kms:GenerateDataKey actions on the +key. The requester must also have permissions for the kms:GenerateDataKey action for the +CreateMultipartUpload API. Then, the requester needs permissions for the kms:Decrypt action +on the UploadPart and UploadPartCopy APIs. These permissions are required because Amazon S3 +must decrypt and read data from the encrypted file parts before it completes the multipart +upload. For more information about KMS permissions, see Protecting data using server-side +encryption with KMS in the Amazon S3 User Guide. For information about the permissions +required to use the multipart upload API, see Multipart upload and permissions and +Multipart upload API and permissions in the Amazon S3 User Guide. Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a @@ -7802,33 +7875,41 @@ permissions in a policy based on the bucket types of your source bucket and dest bucket in an UploadPartCopy operation. If the source object is in a general purpose bucket, you must have the s3:GetObject permission to read the source object that is being copied. If the destination bucket is a general purpose bucket, you must have the -s3:PutObject permission to write the object copy to the destination bucket. For -information about permissions required to use the multipart upload API, see Multipart -Upload and Permissions in the Amazon S3 User Guide. Directory bucket permissions - You -must have permissions in a bucket policy or an IAM identity-based policy based on the -source and destination bucket types in an UploadPartCopy operation. If the source object -that you want to copy is in a directory bucket, you must have the s3express:CreateSession -permission in the Action element of a policy to read the object . By default, the session -is in the ReadWrite mode. If you want to restrict the access, you can explicitly set the -s3express:SessionMode condition key to ReadOnly on the copy source bucket. If the copy -destination is a directory bucket, you must have the s3express:CreateSession permission -in the Action element of a policy to write the object to the destination. The -s3express:SessionMode condition key cannot be set to ReadOnly on the copy destination. -For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web -Services Identity and Access Management (IAM) identity-based policies for S3 Express One -Zone in the Amazon S3 User Guide. Encryption General purpose buckets - For -information about using server-side encryption with customer-provided encryption keys with -the UploadPartCopy operation, see CopyObject and UploadPart. Directory buckets - For -directory buckets, only server-side encryption with Amazon S3 managed keys (SSE-S3) -(AES256) is supported. Special errors Error Code: NoSuchUpload Description: The -specified multipart upload does not exist. The upload ID might be invalid, or the multipart -upload might have been aborted or completed. HTTP Status Code: 404 Not Found Error -Code: InvalidRequest Description: The specified copy source is not supported as a -byte-range copy source. HTTP Status Code: 400 Bad Request HTTP Host header syntax -Directory buckets - The HTTP Host header syntax is -Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are related to -UploadPartCopy: CreateMultipartUpload UploadPart CompleteMultipartUpload -AbortMultipartUpload ListParts ListMultipartUploads +s3:PutObject permission to write the object copy to the destination bucket. To perform +a multipart upload with encryption using an Key Management Service key, the requester must +have permission to the kms:Decrypt and kms:GenerateDataKey actions on the key. The +requester must also have permissions for the kms:GenerateDataKey action for the +CreateMultipartUpload API. Then, the requester needs permissions for the kms:Decrypt action +on the UploadPart and UploadPartCopy APIs. These permissions are required because Amazon S3 +must decrypt and read data from the encrypted file parts before it completes the multipart +upload. For more information about KMS permissions, see Protecting data using server-side +encryption with KMS in the Amazon S3 User Guide. For information about the permissions +required to use the multipart upload API, see Multipart upload and permissions and +Multipart upload API and permissions in the Amazon S3 User Guide. Directory bucket +permissions - You must have permissions in a bucket policy or an IAM identity-based policy +based on the source and destination bucket types in an UploadPartCopy operation. If the +source object that you want to copy is in a directory bucket, you must have the +s3express:CreateSession permission in the Action element of a policy to read the object. +By default, the session is in the ReadWrite mode. If you want to restrict the access, you +can explicitly set the s3express:SessionMode condition key to ReadOnly on the copy source +bucket. If the copy destination is a directory bucket, you must have the +s3express:CreateSession permission in the Action element of a policy to write the object +to the destination. The s3express:SessionMode condition key cannot be set to ReadOnly on +the copy destination. For example policies, see Example bucket policies for S3 Express +One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based +policies for S3 Express One Zone in the Amazon S3 User Guide. Encryption General +purpose buckets - For information about using server-side encryption with +customer-provided encryption keys with the UploadPartCopy operation, see CopyObject and +UploadPart. Directory buckets - For directory buckets, only server-side encryption +with Amazon S3 managed keys (SSE-S3) (AES256) is supported. Special errors Error +Code: NoSuchUpload Description: The specified multipart upload does not exist. The +upload ID might be invalid, or the multipart upload might have been aborted or completed. +HTTP Status Code: 404 Not Found Error Code: InvalidRequest Description: The +specified copy source is not supported as a byte-range copy source. HTTP Status Code: 400 +Bad Request HTTP Host header syntax Directory buckets - The HTTP Host header syntax +is Bucket_name.s3express-az_id.region.amazonaws.com. The following operations are +related to UploadPartCopy: CreateMultipartUpload UploadPart +CompleteMultipartUpload AbortMultipartUpload ListParts ListMultipartUploads # Arguments - `bucket`: The bucket name. Directory buckets - When you use this operation with a diff --git a/src/services/s3_control.jl b/src/services/s3_control.jl index 3085f990c4..04e4c72e3b 100644 --- a/src/services/s3_control.jl +++ b/src/services/s3_control.jl @@ -23,7 +23,7 @@ sso:PutApplicationGrant, and sso:PutApplicationAuthenticationMethod. IAM Identity Center instance is your corporate identity directory that you added to the IAM Identity Center. You can use the ListInstances API operation to retrieve a list of your Identity Center instances and their ARNs. -- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. +- `x-amz-account-id`: The Amazon Web Services account ID of the S3 Access Grants instance. """ function associate_access_grants_identity_center( @@ -93,7 +93,7 @@ identitystore:DescribeUser For directory groups - identitystore:DescribeGroup to one of the following values: READ – Grant read-only access to the S3 data. WRITE – Grant write-only access to the S3 data. READWRITE – Grant both read and write access to the S3 data. -- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. +- `x-amz-account-id`: The Amazon Web Services account ID of the S3 Access Grants instance. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -172,7 +172,7 @@ instance, you must also have the sso:DescribeInstance, sso:CreateApplication, sso:PutApplicationGrant, and sso:PutApplicationAuthenticationMethod permissions. # Arguments -- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. +- `x-amz-account-id`: The Amazon Web Services account ID of the S3 Access Grants instance. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -247,7 +247,7 @@ iam:PassRole string of characters at the beginning of an object key name used to organize the objects that you store in your S3 buckets. For example, object key names that start with the engineering/ prefix or object key names that start with the marketing/campaigns/ prefix. -- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. +- `x-amz-account-id`: The Amazon Web Services account ID of the S3 Access Grants instance. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -739,7 +739,7 @@ must have the s3:DeleteAccessGrant permission to use this operation. # Arguments - `id`: The ID of the access grant. S3 Access Grants auto-generates this ID when you create the access grant. -- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. +- `x-amz-account-id`: The Amazon Web Services account ID of the S3 Access Grants instance. """ function delete_access_grant( @@ -791,7 +791,7 @@ AssociateAccessGrantsIdentityCenter and DissociateAccessGrantsIdentityCenter. P You must have the s3:DeleteAccessGrantsInstance permission to use this operation. # Arguments -- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. +- `x-amz-account-id`: The Amazon Web Services account ID of the S3 Access Grants instance. """ function delete_access_grants_instance( @@ -840,7 +840,7 @@ Permissions You must have the s3:DeleteAccessGrantsInstanceResourcePolicy permi use this operation. # Arguments -- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. +- `x-amz-account-id`: The Amazon Web Services account ID of the S3 Access Grants instance. """ function delete_access_grants_instance_resource_policy( @@ -894,7 +894,7 @@ this operation. Grants instance. S3 Access Grants assigned this ID when you registered the location. S3 Access Grants assigns the ID default to the default location s3:// and assigns an auto-generated ID to other locations that you register. -- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. +- `x-amz-account-id`: The Amazon Web Services account ID of the S3 Access Grants instance. """ function delete_access_grants_location( @@ -1958,7 +1958,7 @@ permission to use this operation. Additional Permissions You must have the sso:DeleteApplication permission to use this operation. # Arguments -- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. +- `x-amz-account-id`: The Amazon Web Services account ID of the S3 Access Grants instance. """ function dissociate_access_grants_identity_center( @@ -2006,7 +2006,7 @@ must have the s3:GetAccessGrant permission to use this operation. # Arguments - `id`: The ID of the access grant. S3 Access Grants auto-generates this ID when you create the access grant. -- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. +- `x-amz-account-id`: The Amazon Web Services account ID of the S3 Access Grants instance. """ function get_access_grant( @@ -2051,9 +2051,11 @@ end Retrieves the S3 Access Grants instance for a Region in your account. Permissions You must have the s3:GetAccessGrantsInstance permission to use this operation. +GetAccessGrantsInstance is not supported for cross-account access. You can only call the +API from the account that owns the S3 Access Grants instance. # Arguments -- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. +- `x-amz-account-id`: The Amazon Web Services account ID of the S3 Access Grants instance. """ function get_access_grants_instance( @@ -2152,7 +2154,7 @@ Returns the resource policy of the S3 Access Grants instance. Permissions You the s3:GetAccessGrantsInstanceResourcePolicy permission to use this operation. # Arguments -- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. +- `x-amz-account-id`: The Amazon Web Services account ID of the S3 Access Grants instance. """ function get_access_grants_instance_resource_policy( @@ -2203,7 +2205,7 @@ this operation. this ID when you register the location. S3 Access Grants assigns the ID default to the default location s3:// and assigns an auto-generated ID to other locations that you register. -- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. +- `x-amz-account-id`: The Amazon Web Services account ID of the S3 Access Grants instance. """ function get_access_grants_location( @@ -3059,7 +3061,7 @@ sts:SetSourceIdentity. - `target`: The S3 URI path of the data to which you are requesting temporary access credentials. If the requesting account has an access grant for this data, S3 Access Grants vends temporary access credentials in the response. -- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. +- `x-amz-account-id`: The Amazon Web Services account ID of the S3 Access Grants instance. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -3607,7 +3609,7 @@ Returns the list of access grants in your S3 Access Grants instance. Permission have the s3:ListAccessGrants permission to use this operation. # Arguments -- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. +- `x-amz-account-id`: The Amazon Web Services account ID of the S3 Access Grants instance. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -3684,7 +3686,7 @@ instance per Region per account. Permissions You must have the s3:ListAccessGrantsInstances permission to use this operation. # Arguments -- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. +- `x-amz-account-id`: The Amazon Web Services account ID of the S3 Access Grants instance. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -3738,7 +3740,7 @@ Returns a list of the locations registered in your S3 Access Grants instance. P You must have the s3:ListAccessGrantsLocations permission to use this operation. # Arguments -- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. +- `x-amz-account-id`: The Amazon Web Services account ID of the S3 Access Grants instance. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -3926,6 +3928,68 @@ function list_access_points_for_object_lambda( ) end +""" + list_caller_access_grants(x-amz-account-id) + list_caller_access_grants(x-amz-account-id, params::Dict{String,<:Any}) + +Returns a list of the access grants that were given to the caller using S3 Access Grants +and that allow the caller to access the S3 data of the Amazon Web Services account +specified in the request. Permissions You must have the s3:ListCallerAccessGrants +permission to use this operation. + +# Arguments +- `x-amz-account-id`: The Amazon Web Services account ID of the S3 Access Grants instance. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"allowedByApplication"`: If this optional parameter is passed in the request, a filter + is applied to the results. The results will include only the access grants for the caller's + Identity Center application or for any other applications (ALL). +- `"grantscope"`: The S3 path of the data that you would like to access. Must start with + s3://. You can optionally pass only the beginning characters of a path, and S3 Access + Grants will search for all applicable grants for the path fragment. +- `"maxResults"`: The maximum number of access grants that you would like returned in the + List Caller Access Grants response. If the results include the pagination token NextToken, + make another call using the NextToken to determine if there are more results. +- `"nextToken"`: A pagination token to request the next page of results. Pass this value + into a subsequent List Caller Access Grants request in order to retrieve the next page of + results. +""" +function list_caller_access_grants( + x_amz_account_id; aws_config::AbstractAWSConfig=global_aws_config() +) + return s3_control( + "GET", + "/v20180820/accessgrantsinstance/caller/grants", + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_caller_access_grants( + x_amz_account_id, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return s3_control( + "GET", + "/v20180820/accessgrantsinstance/caller/grants", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "headers" => Dict{String,Any}("x-amz-account-id" => x_amz_account_id) + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_jobs(x-amz-account-id) list_jobs(x-amz-account-id, params::Dict{String,<:Any}) @@ -4271,7 +4335,7 @@ the s3:PutAccessGrantsInstanceResourcePolicy permission to use this operation. # Arguments - `policy`: The resource policy of the S3 Access Grants instance that you are updating. -- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. +- `x-amz-account-id`: The Amazon Web Services account ID of the S3 Access Grants instance. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -5462,7 +5526,7 @@ iam:PassRole that you register. If you are passing the default location, you cannot create an access grant for the entire default location. You must also specify a bucket or a bucket and prefix in the Subprefix field. -- `x-amz-account-id`: The ID of the Amazon Web Services account that is making this request. +- `x-amz-account-id`: The Amazon Web Services account ID of the S3 Access Grants instance. """ function update_access_grants_location( diff --git a/src/services/sagemaker.jl b/src/services/sagemaker.jl index bf951fe784..c1efe400fb 100644 --- a/src/services/sagemaker.jl +++ b/src/services/sagemaker.jl @@ -506,15 +506,27 @@ end create_auto_mljob(auto_mljob_name, input_data_config, output_data_config, role_arn) create_auto_mljob(auto_mljob_name, input_data_config, output_data_config, role_arn, params::Dict{String,<:Any}) -Creates an Autopilot job also referred to as Autopilot experiment or AutoML job. We -recommend using the new versions CreateAutoMLJobV2 and DescribeAutoMLJobV2, which offer -backward compatibility. CreateAutoMLJobV2 can manage tabular problem types identical to -those of its previous version CreateAutoMLJob, as well as time-series forecasting, -non-tabular problem types such as image or text classification, and text generation (LLMs -fine-tuning). Find guidelines about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 -in Migrate a CreateAutoMLJob to CreateAutoMLJobV2. You can find the best-performing model -after you run an AutoML job by calling DescribeAutoMLJobV2 (recommended) or -DescribeAutoMLJob. +Creates an Autopilot job also referred to as Autopilot experiment or AutoML job. An AutoML +job in SageMaker is a fully automated process that allows you to build machine learning +models with minimal effort and machine learning expertise. When initiating an AutoML job, +you provide your data and optionally specify parameters tailored to your use case. +SageMaker then automates the entire model development lifecycle, including data +preprocessing, model training, tuning, and evaluation. AutoML jobs are designed to simplify +and accelerate the model building process by automating various tasks and exploring +different combinations of machine learning algorithms, data preprocessing techniques, and +hyperparameter values. The output of an AutoML job comprises one or more trained models +ready for deployment and inference. Additionally, SageMaker AutoML jobs generate a +candidate model leaderboard, allowing you to select the best-performing model for +deployment. For more information about AutoML jobs, see +https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html +in the SageMaker developer guide. We recommend using the new versions CreateAutoMLJobV2 +and DescribeAutoMLJobV2, which offer backward compatibility. CreateAutoMLJobV2 can manage +tabular problem types identical to those of its previous version CreateAutoMLJob, as well +as time-series forecasting, non-tabular problem types such as image or text classification, +and text generation (LLMs fine-tuning). Find guidelines about how to migrate a +CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2. +You can find the best-performing model after you run an AutoML job by calling +DescribeAutoMLJobV2 (recommended) or DescribeAutoMLJob. # Arguments - `auto_mljob_name`: Identifies an Autopilot job. The name must be unique to your account @@ -596,16 +608,31 @@ end create_auto_mljob_v2(auto_mljob_input_data_config, auto_mljob_name, auto_mlproblem_type_config, output_data_config, role_arn) create_auto_mljob_v2(auto_mljob_input_data_config, auto_mljob_name, auto_mlproblem_type_config, output_data_config, role_arn, params::Dict{String,<:Any}) -Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2. -CreateAutoMLJobV2 and DescribeAutoMLJobV2 are new versions of CreateAutoMLJob and -DescribeAutoMLJob which offer backward compatibility. CreateAutoMLJobV2 can manage tabular -problem types identical to those of its previous version CreateAutoMLJob, as well as -time-series forecasting, non-tabular problem types such as image or text classification, -and text generation (LLMs fine-tuning). Find guidelines about how to migrate a -CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to CreateAutoMLJobV2. -For the list of available problem types supported by CreateAutoMLJobV2, see -AutoMLProblemTypeConfig. You can find the best-performing model after you run an AutoML job -V2 by calling DescribeAutoMLJobV2. +Creates an Autopilot job also referred to as Autopilot experiment or AutoML job V2. An +AutoML job in SageMaker is a fully automated process that allows you to build machine +learning models with minimal effort and machine learning expertise. When initiating an +AutoML job, you provide your data and optionally specify parameters tailored to your use +case. SageMaker then automates the entire model development lifecycle, including data +preprocessing, model training, tuning, and evaluation. AutoML jobs are designed to simplify +and accelerate the model building process by automating various tasks and exploring +different combinations of machine learning algorithms, data preprocessing techniques, and +hyperparameter values. The output of an AutoML job comprises one or more trained models +ready for deployment and inference. Additionally, SageMaker AutoML jobs generate a +candidate model leaderboard, allowing you to select the best-performing model for +deployment. For more information about AutoML jobs, see +https://docs.aws.amazon.com/sagemaker/latest/dg/autopilot-automate-model-development.html +in the SageMaker developer guide. AutoML jobs V2 support various problem types such as +regression, binary, and multiclass classification with tabular data, text and image +classification, time-series forecasting, and fine-tuning of large language models (LLMs) +for text generation. CreateAutoMLJobV2 and DescribeAutoMLJobV2 are new versions of +CreateAutoMLJob and DescribeAutoMLJob which offer backward compatibility. +CreateAutoMLJobV2 can manage tabular problem types identical to those of its previous +version CreateAutoMLJob, as well as time-series forecasting, non-tabular problem types such +as image or text classification, and text generation (LLMs fine-tuning). Find guidelines +about how to migrate a CreateAutoMLJob to CreateAutoMLJobV2 in Migrate a CreateAutoMLJob to +CreateAutoMLJobV2. For the list of available problem types supported by CreateAutoMLJobV2, +see AutoMLProblemTypeConfig. You can find the best-performing model after you run an AutoML +job V2 by calling DescribeAutoMLJobV2. # Arguments - `auto_mljob_input_data_config`: An array of channel objects describing the input data and @@ -625,6 +652,7 @@ V2 by calling DescribeAutoMLJobV2. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"AutoMLComputeConfig"`: Specifies the compute configuration for the AutoML job V2. - `"AutoMLJobObjective"`: Specifies a metric to minimize or maximize as the objective of a job. If not specified, the default objective metric depends on the problem type. For the list of default values per problem type, see AutoMLJobObjective. For tabular problem @@ -3532,6 +3560,101 @@ function create_notebook_instance_lifecycle_config( ) end +""" + create_optimization_job(deployment_instance_type, model_source, optimization_configs, optimization_job_name, output_config, role_arn, stopping_condition) + create_optimization_job(deployment_instance_type, model_source, optimization_configs, optimization_job_name, output_config, role_arn, stopping_condition, params::Dict{String,<:Any}) + +Creates a job that optimizes a model for inference performance. To create the job, you +provide the location of a source model, and you provide the settings for the optimization +techniques that you want the job to apply. When the job completes successfully, SageMaker +uploads the new optimized model to the output destination that you specify. For more +information about how to use this action, and about the supported optimization techniques, +see Optimize model inference with Amazon SageMaker. + +# Arguments +- `deployment_instance_type`: The type of instance that hosts the optimized model that you + create with the optimization job. +- `model_source`: The location of the source model to optimize with an optimization job. +- `optimization_configs`: Settings for each of the optimization techniques that the job + applies. +- `optimization_job_name`: A custom name for the new optimization job. +- `output_config`: Details for where to store the optimized model that you create with the + optimization job. +- `role_arn`: The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker + to perform tasks on your behalf. During model optimization, Amazon SageMaker needs your + permission to: Read input data from an S3 bucket Write model artifacts to an S3 bucket + Write logs to Amazon CloudWatch Logs Publish metrics to Amazon CloudWatch You grant + permissions for all of these tasks to an IAM role. To pass this role to Amazon SageMaker, + the caller of this API must have the iam:PassRole permission. For more information, see + Amazon SageMaker Roles. +- `stopping_condition`: + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"OptimizationEnvironment"`: The environment variables to set in the model container. +- `"Tags"`: A list of key-value pairs associated with the optimization job. For more + information, see Tagging Amazon Web Services resources in the Amazon Web Services General + Reference Guide. +- `"VpcConfig"`: A VPC in Amazon VPC that your optimized model has access to. +""" +function create_optimization_job( + DeploymentInstanceType, + ModelSource, + OptimizationConfigs, + OptimizationJobName, + OutputConfig, + RoleArn, + StoppingCondition; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "CreateOptimizationJob", + Dict{String,Any}( + "DeploymentInstanceType" => DeploymentInstanceType, + "ModelSource" => ModelSource, + "OptimizationConfigs" => OptimizationConfigs, + "OptimizationJobName" => OptimizationJobName, + "OutputConfig" => OutputConfig, + "RoleArn" => RoleArn, + "StoppingCondition" => StoppingCondition, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_optimization_job( + DeploymentInstanceType, + ModelSource, + OptimizationConfigs, + OptimizationJobName, + OutputConfig, + RoleArn, + StoppingCondition, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "CreateOptimizationJob", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "DeploymentInstanceType" => DeploymentInstanceType, + "ModelSource" => ModelSource, + "OptimizationConfigs" => OptimizationConfigs, + "OptimizationJobName" => OptimizationJobName, + "OutputConfig" => OutputConfig, + "RoleArn" => RoleArn, + "StoppingCondition" => StoppingCondition, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ create_pipeline(client_request_token, pipeline_name, role_arn) create_pipeline(client_request_token, pipeline_name, role_arn, params::Dict{String,<:Any}) @@ -4248,8 +4371,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys not needed for inference in a batch transform job. The output filter provided allows you to include input data relevant to interpreting the predictions in the output from the job. For more information, see Associate Prediction Results with their Corresponding Input Records. -- `"Environment"`: The environment variables to set in the Docker container. We support up - to 16 key and values entries in the map. +- `"Environment"`: The environment variables to set in the Docker container. Don't include + any sensitive data in your environment variables. We support up to 16 key and values + entries in the map. - `"ExperimentConfig"`: - `"MaxConcurrentTransforms"`: The maximum number of parallel requests that can be sent to each instance in a transform job. If MaxConcurrentTransforms is set to 0 or left unset, @@ -5503,7 +5627,8 @@ Delete a hub content reference in order to remove a model from a private hub. # Arguments - `hub_content_name`: The name of the hub content to delete. -- `hub_content_type`: The type of hub content to delete. +- `hub_content_type`: The type of hub content reference to delete. The only supported type + of hub content reference to delete is ModelReference. - `hub_name`: The name of the hub to delete the hub content reference from. """ @@ -6240,6 +6365,45 @@ function delete_notebook_instance_lifecycle_config( ) end +""" + delete_optimization_job(optimization_job_name) + delete_optimization_job(optimization_job_name, params::Dict{String,<:Any}) + +Deletes an optimization job. + +# Arguments +- `optimization_job_name`: The name that you assigned to the optimization job. + +""" +function delete_optimization_job( + OptimizationJobName; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "DeleteOptimizationJob", + Dict{String,Any}("OptimizationJobName" => OptimizationJobName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_optimization_job( + OptimizationJobName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "DeleteOptimizationJob", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("OptimizationJobName" => OptimizationJobName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_pipeline(client_request_token, pipeline_name) delete_pipeline(client_request_token, pipeline_name, params::Dict{String,<:Any}) @@ -8493,6 +8657,45 @@ function describe_notebook_instance_lifecycle_config( ) end +""" + describe_optimization_job(optimization_job_name) + describe_optimization_job(optimization_job_name, params::Dict{String,<:Any}) + +Provides the properties of the specified optimization job. + +# Arguments +- `optimization_job_name`: The name that you assigned to the optimization job. + +""" +function describe_optimization_job( + OptimizationJobName; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "DescribeOptimizationJob", + Dict{String,Any}("OptimizationJobName" => OptimizationJobName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_optimization_job( + OptimizationJobName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "DescribeOptimizationJob", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("OptimizationJobName" => OptimizationJobName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ describe_pipeline(pipeline_name) describe_pipeline(pipeline_name, params::Dict{String,<:Any}) @@ -11782,6 +11985,53 @@ function list_notebook_instances( ) end +""" + list_optimization_jobs() + list_optimization_jobs(params::Dict{String,<:Any}) + +Lists the optimization jobs in your account and their properties. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"CreationTimeAfter"`: Filters the results to only those optimization jobs that were + created after the specified time. +- `"CreationTimeBefore"`: Filters the results to only those optimization jobs that were + created before the specified time. +- `"LastModifiedTimeAfter"`: Filters the results to only those optimization jobs that were + updated after the specified time. +- `"LastModifiedTimeBefore"`: Filters the results to only those optimization jobs that were + updated before the specified time. +- `"MaxResults"`: The maximum number of optimization jobs to return in the response. The + default is 50. +- `"NameContains"`: Filters the results to only those optimization jobs with a name that + contains the specified string. +- `"NextToken"`: A token that you use to get the next set of results following a truncated + response. If the response to the previous request was truncated, that response provides the + value for this token. +- `"OptimizationContains"`: Filters the results to only those optimization jobs that apply + the specified optimization techniques. You can specify either Quantization or Compilation. +- `"SortBy"`: The field by which to sort the optimization jobs in the response. The default + is CreationTime +- `"SortOrder"`: The sort order for results. The default is Ascending +- `"StatusEquals"`: Filters the results to only those optimization jobs with the specified + status. +""" +function list_optimization_jobs(; aws_config::AbstractAWSConfig=global_aws_config()) + return sagemaker( + "ListOptimizationJobs"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function list_optimization_jobs( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "ListOptimizationJobs", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ list_pipeline_execution_steps() list_pipeline_execution_steps(params::Dict{String,<:Any}) @@ -13663,6 +13913,45 @@ function stop_notebook_instance( ) end +""" + stop_optimization_job(optimization_job_name) + stop_optimization_job(optimization_job_name, params::Dict{String,<:Any}) + +Ends a running inference optimization job. + +# Arguments +- `optimization_job_name`: The name that you assigned to the optimization job. + +""" +function stop_optimization_job( + OptimizationJobName; aws_config::AbstractAWSConfig=global_aws_config() +) + return sagemaker( + "StopOptimizationJob", + Dict{String,Any}("OptimizationJobName" => OptimizationJobName); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_optimization_job( + OptimizationJobName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return sagemaker( + "StopOptimizationJob", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("OptimizationJobName" => OptimizationJobName), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ stop_pipeline_execution(client_request_token, pipeline_execution_arn) stop_pipeline_execution(client_request_token, pipeline_execution_arn, params::Dict{String,<:Any}) diff --git a/src/services/secrets_manager.jl b/src/services/secrets_manager.jl index 90c4910c41..3cf851578a 100644 --- a/src/services/secrets_manager.jl +++ b/src/services/secrets_manager.jl @@ -133,7 +133,10 @@ secretsmanager:TagResource. To add replica Regions, you must also have secretsmanager:ReplicateSecretToRegions. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. To encrypt the secret with a KMS key other than aws/secretsmanager, you need kms:GenerateDataKey and -kms:Decrypt permission to the key. +kms:Decrypt permission to the key. When you enter commands in a command shell, there is a +risk of the command history being accessed or utilities having access to your command +parameters. This is a concern if the command includes the value of a secret. Learn how to +Mitigate the risks of using command-line tools to store Secrets Manager secrets. # Arguments - `name`: The name of the new secret. The secret name can contain ASCII letters, numbers, @@ -725,7 +728,11 @@ log entry when you call this action. Do not include sensitive information in req parameters except SecretBinary, SecretString, or RotationToken because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: secretsmanager:PutSecretValue. For more information, see IAM policy actions -for Secrets Manager and Authentication and access control in Secrets Manager. +for Secrets Manager and Authentication and access control in Secrets Manager. When you +enter commands in a command shell, there is a risk of the command history being accessed or +utilities having access to your command parameters. This is a concern if the command +includes the value of a secret. Learn how to Mitigate the risks of using command-line tools +to store Secrets Manager secrets. # Arguments - `secret_id`: The ARN or name of the secret to add a new version to. For an ARN, we @@ -1215,8 +1222,12 @@ secretsmanager:UpdateSecret. For more information, see IAM policy actions for S Manager and Authentication and access control in Secrets Manager. If you use a customer managed key, you must also have kms:GenerateDataKey, kms:Encrypt, and kms:Decrypt permissions on the key. If you change the KMS key and you don't have kms:Encrypt permission -to the new key, Secrets Manager does not re-ecrypt existing secret versions with the new -key. For more information, see Secret encryption and decryption. +to the new key, Secrets Manager does not re-encrypt existing secret versions with the new +key. For more information, see Secret encryption and decryption. When you enter commands +in a command shell, there is a risk of the command history being accessed or utilities +having access to your command parameters. This is a concern if the command includes the +value of a secret. Learn how to Mitigate the risks of using command-line tools to store +Secrets Manager secrets. # Arguments - `secret_id`: The ARN or name of the secret. For an ARN, we recommend that you specify a @@ -1239,13 +1250,13 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"KmsKeyId"`: The ARN, key ID, or alias of the KMS key that Secrets Manager uses to encrypt new secret versions as well as any existing versions with the staging labels AWSCURRENT, AWSPENDING, or AWSPREVIOUS. If you don't have kms:Encrypt permission to the new - key, Secrets Manager does not re-ecrypt existing secret versions with the new key. For more - information about versions and staging labels, see Concepts: Version. A key alias is always - prefixed by alias/, for example alias/aws/secretsmanager. For more information, see About - aliases. If you set this to an empty string, Secrets Manager uses the Amazon Web Services - managed key aws/secretsmanager. If this key doesn't already exist in your account, then - Secrets Manager creates it for you automatically. All users and roles in the Amazon Web - Services account automatically have access to use aws/secretsmanager. Creating + key, Secrets Manager does not re-encrypt existing secret versions with the new key. For + more information about versions and staging labels, see Concepts: Version. A key alias is + always prefixed by alias/, for example alias/aws/secretsmanager. For more information, see + About aliases. If you set this to an empty string, Secrets Manager uses the Amazon Web + Services managed key aws/secretsmanager. If this key doesn't already exist in your account, + then Secrets Manager creates it for you automatically. All users and roles in the Amazon + Web Services account automatically have access to use aws/secretsmanager. Creating aws/secretsmanager can result in a one-time significant delay in returning the result. You can only use the Amazon Web Services managed key aws/secretsmanager if you call this operation using credentials from the same Amazon Web Services account that owns the secret. diff --git a/src/services/securityhub.jl b/src/services/securityhub.jl index 8ea1cbcda9..f860689fee 100644 --- a/src/services/securityhub.jl +++ b/src/services/securityhub.jl @@ -851,22 +851,24 @@ Hub User Guide. Regions in the current partition. Also determines whether to automatically aggregate findings from new Regions as Security Hub supports them and you opt into them. The selected option also determines how to use the Regions provided in the Regions list. The options are - as follows: ALL_REGIONS - Indicates to aggregate findings from all of the Regions where - Security Hub is enabled. When you choose this option, Security Hub also automatically - aggregates findings from new Regions as Security Hub supports them and you opt into them. - ALL_REGIONS_EXCEPT_SPECIFIED - Indicates to aggregate findings from all of the Regions - where Security Hub is enabled, except for the Regions listed in the Regions parameter. When - you choose this option, Security Hub also automatically aggregates findings from new - Regions as Security Hub supports them and you opt into them. SPECIFIED_REGIONS - - Indicates to aggregate findings only from the Regions listed in the Regions parameter. - Security Hub does not automatically aggregate findings from new Regions. + as follows: ALL_REGIONS - Aggregates findings from all of the Regions where Security Hub + is enabled. When you choose this option, Security Hub also automatically aggregates + findings from new Regions as Security Hub supports them and you opt into them. + ALL_REGIONS_EXCEPT_SPECIFIED - Aggregates findings from all of the Regions where Security + Hub is enabled, except for the Regions listed in the Regions parameter. When you choose + this option, Security Hub also automatically aggregates findings from new Regions as + Security Hub supports them and you opt into them. SPECIFIED_REGIONS - Aggregates + findings only from the Regions listed in the Regions parameter. Security Hub does not + automatically aggregate findings from new Regions. NO_REGIONS - Aggregates no data + because no Regions are selected as linked Regions. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Regions"`: If RegionLinkingMode is ALL_REGIONS_EXCEPT_SPECIFIED, then this is a space-separated list of Regions that do not aggregate findings to the aggregation Region. If RegionLinkingMode is SPECIFIED_REGIONS, then this is a space-separated list of Regions - that do aggregate findings to the aggregation Region. + that do aggregate findings to the aggregation Region. An InvalidInputException error + results if you populate this field while RegionLinkingMode is NO_REGIONS. """ function create_finding_aggregator( RegionLinkingMode; aws_config::AbstractAWSConfig=global_aws_config() @@ -3037,22 +3039,24 @@ Region. Regions in the current partition. Also determines whether to automatically aggregate findings from new Regions as Security Hub supports them and you opt into them. The selected option also determines how to use the Regions provided in the Regions list. The options are - as follows: ALL_REGIONS - Indicates to aggregate findings from all of the Regions where - Security Hub is enabled. When you choose this option, Security Hub also automatically - aggregates findings from new Regions as Security Hub supports them and you opt into them. - ALL_REGIONS_EXCEPT_SPECIFIED - Indicates to aggregate findings from all of the Regions - where Security Hub is enabled, except for the Regions listed in the Regions parameter. When - you choose this option, Security Hub also automatically aggregates findings from new - Regions as Security Hub supports them and you opt into them. SPECIFIED_REGIONS - - Indicates to aggregate findings only from the Regions listed in the Regions parameter. - Security Hub does not automatically aggregate findings from new Regions. + as follows: ALL_REGIONS - Aggregates findings from all of the Regions where Security Hub + is enabled. When you choose this option, Security Hub also automatically aggregates + findings from new Regions as Security Hub supports them and you opt into them. + ALL_REGIONS_EXCEPT_SPECIFIED - Aggregates findings from all of the Regions where Security + Hub is enabled, except for the Regions listed in the Regions parameter. When you choose + this option, Security Hub also automatically aggregates findings from new Regions as + Security Hub supports them and you opt into them. SPECIFIED_REGIONS - Aggregates + findings only from the Regions listed in the Regions parameter. Security Hub does not + automatically aggregate findings from new Regions. NO_REGIONS - Aggregates no data + because no Regions are selected as linked Regions. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"Regions"`: If RegionLinkingMode is ALL_REGIONS_EXCEPT_SPECIFIED, then this is a space-separated list of Regions that do not aggregate findings to the aggregation Region. If RegionLinkingMode is SPECIFIED_REGIONS, then this is a space-separated list of Regions - that do aggregate findings to the aggregation Region. + that do aggregate findings to the aggregation Region. An InvalidInputException error + results if you populate this field while RegionLinkingMode is NO_REGIONS. """ function update_finding_aggregator( FindingAggregatorArn, @@ -3099,11 +3103,12 @@ end update_findings(filters, params::Dict{String,<:Any}) UpdateFindings is a deprecated operation. Instead of UpdateFindings, use the -BatchUpdateFindings operation. Updates the Note and RecordState of the Security -Hub-aggregated findings that the filter attributes specify. Any member account that can -view the finding also sees the update to the finding. Finding updates made with -UpdateFindings might not be persisted if the same finding is later updated by the finding -provider through the BatchImportFindings operation. +BatchUpdateFindings operation. The UpdateFindings operation updates the Note and +RecordState of the Security Hub aggregated findings that the filter attributes specify. Any +member account that can view the finding can also see the update to the finding. Finding +updates made with UpdateFindings aren't persisted if the same finding is later updated by +the finding provider through the BatchImportFindings operation. In addition, Security Hub +doesn't record updates made with UpdateFindings in the finding history. # Arguments - `filters`: A collection of attributes that specify which findings you want to update. diff --git a/src/services/ses.jl b/src/services/ses.jl index 597039f3df..dc0fb1389e 100644 --- a/src/services/ses.jl +++ b/src/services/ses.jl @@ -1863,8 +1863,8 @@ function send_bounce( end """ - send_bulk_templated_email(destinations, source, template) - send_bulk_templated_email(destinations, source, template, params::Dict{String,<:Any}) + send_bulk_templated_email(default_template_data, destinations, source, template) + send_bulk_templated_email(default_template_data, destinations, source, template, params::Dict{String,<:Any}) Composes an email message to multiple destinations. The message body is created using an email template. To send email using this operation, your call must meet the following @@ -1885,6 +1885,11 @@ send the message to each group. The number of destinations you can contact in call can be limited by your account's maximum sending rate. # Arguments +- `default_template_data`: A list of replacement values to apply to the template when + replacement data is not specified in a Destination object. These values act as a default or + fallback option when no other data is available. The template data is a JSON object, + typically consisting of key-value pairs in which the keys correspond to replacement tags in + the email template. - `destinations`: One or more Destination objects. All of the recipients in a Destination receive the same version of the email. You can specify up to 50 Destination objects within a Destinations array. @@ -1910,11 +1915,6 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys using SendBulkTemplatedEmail. - `"DefaultTags"`: A list of tags, in the form of name/value pairs, to apply to an email that you send to a destination using SendBulkTemplatedEmail. -- `"DefaultTemplateData"`: A list of replacement values to apply to the template when - replacement data is not specified in a Destination object. These values act as a default or - fallback option when no other data is available. The template data is a JSON object, - typically consisting of key-value pairs in which the keys correspond to replacement tags in - the email template. - `"ReplyToAddresses"`: The reply-to email address(es) for the message. If the recipient replies to the message, each reply-to address receives the reply. - `"ReturnPath"`: The email address that bounces and complaints are forwarded to when @@ -1942,18 +1942,26 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"TemplateArn"`: The ARN of the template to use when sending this email. """ function send_bulk_templated_email( - Destinations, Source, Template; aws_config::AbstractAWSConfig=global_aws_config() + DefaultTemplateData, + Destinations, + Source, + Template; + aws_config::AbstractAWSConfig=global_aws_config(), ) return ses( "SendBulkTemplatedEmail", Dict{String,Any}( - "Destinations" => Destinations, "Source" => Source, "Template" => Template + "DefaultTemplateData" => DefaultTemplateData, + "Destinations" => Destinations, + "Source" => Source, + "Template" => Template, ); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function send_bulk_templated_email( + DefaultTemplateData, Destinations, Source, Template, @@ -1966,6 +1974,7 @@ function send_bulk_templated_email( mergewith( _merge, Dict{String,Any}( + "DefaultTemplateData" => DefaultTemplateData, "Destinations" => Destinations, "Source" => Source, "Template" => Template, diff --git a/src/services/sesv2.jl b/src/services/sesv2.jl index 8ca722ef37..a2559dbffa 100644 --- a/src/services/sesv2.jl +++ b/src/services/sesv2.jl @@ -2435,14 +2435,13 @@ function put_account_dedicated_ip_warmup_attributes( end """ - put_account_details(mail_type, use_case_description, website_url) - put_account_details(mail_type, use_case_description, website_url, params::Dict{String,<:Any}) + put_account_details(mail_type, website_url) + put_account_details(mail_type, website_url, params::Dict{String,<:Any}) Update your Amazon SES account details. # Arguments - `mail_type`: The type of email your account will send. -- `use_case_description`: A description of the types of email that you plan to send. - `website_url`: The URL of your website. This information helps us better understand the type of content that you plan to send. @@ -2457,28 +2456,21 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys identities. If the value is true, then your account has production access. When your account has production access, you can send email to any address. The sending quota and maximum sending rate for your account vary based on your specific use case. +- `"UseCaseDescription"`: A description of the types of email that you plan to send. """ function put_account_details( - MailType, - UseCaseDescription, - WebsiteURL; - aws_config::AbstractAWSConfig=global_aws_config(), + MailType, WebsiteURL; aws_config::AbstractAWSConfig=global_aws_config() ) return sesv2( "POST", "/v2/email/account/details", - Dict{String,Any}( - "MailType" => MailType, - "UseCaseDescription" => UseCaseDescription, - "WebsiteURL" => WebsiteURL, - ); + Dict{String,Any}("MailType" => MailType, "WebsiteURL" => WebsiteURL); aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) end function put_account_details( MailType, - UseCaseDescription, WebsiteURL, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config(), @@ -2489,11 +2481,7 @@ function put_account_details( Dict{String,Any}( mergewith( _merge, - Dict{String,Any}( - "MailType" => MailType, - "UseCaseDescription" => UseCaseDescription, - "WebsiteURL" => WebsiteURL, - ), + Dict{String,Any}("MailType" => MailType, "WebsiteURL" => WebsiteURL), params, ), ); diff --git a/src/services/sfn.jl b/src/services/sfn.jl index dbd8f7bdbf..57e91f6afd 100644 --- a/src/services/sfn.jl +++ b/src/services/sfn.jl @@ -30,6 +30,7 @@ case, tags will not be updated, even if they are different. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"encryptionConfiguration"`: Settings to configure server-side encryption. - `"tags"`: The list of tags to add to a resource. An array of key-value pairs. For more information, see Using Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User Guide, and Controlling Access Using IAM Tags. Tags may only contain Unicode @@ -63,15 +64,19 @@ work (Task states), determine to which states to transition next (Choice states) execution with an error (Fail states), and so on. State machines are specified using a JSON-based, structured language. For more information, see Amazon States Language in the Step Functions User Guide. If you set the publish parameter of this API action to true, it -publishes version 1 as the first revision of the state machine. This operation is -eventually consistent. The results are best effort and may not reflect very recent updates -and changes. CreateStateMachine is an idempotent API. Subsequent requests won’t create -a duplicate resource if it was already created. CreateStateMachine's idempotency check is -based on the state machine name, definition, type, LoggingConfiguration, and -TracingConfiguration. The check is also based on the publish and versionDescription -parameters. If a following request has a different roleArn or tags, Step Functions will -ignore these differences and treat it as an idempotent request of the previous. In this -case, roleArn and tags will not be updated, even if they are different. +publishes version 1 as the first revision of the state machine. For additional control +over security, you can encrypt your data using a customer-managed key for Step Functions +state machines. You can configure a symmetric KMS key and data key reuse period when +creating or updating a State Machine. The execution history and state machine definition +will be encrypted with the key applied to the State Machine. This operation is eventually +consistent. The results are best effort and may not reflect very recent updates and +changes. CreateStateMachine is an idempotent API. Subsequent requests won’t create a +duplicate resource if it was already created. CreateStateMachine's idempotency check is +based on the state machine name, definition, type, LoggingConfiguration, +TracingConfiguration, and EncryptionConfiguration The check is also based on the publish +and versionDescription parameters. If a following request has a different roleArn or tags, +Step Functions will ignore these differences and treat it as an idempotent request of the +previous. In this case, roleArn and tags will not be updated, even if they are different. # Arguments - `definition`: The Amazon States Language definition of the state machine. See Amazon @@ -84,6 +89,7 @@ case, roleArn and tags will not be updated, even if they are different. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"encryptionConfiguration"`: Settings to configure server-side encryption. - `"loggingConfiguration"`: Defines what execution history events are logged and where they are logged. By default, the level is set to OFF. For more information see Log Levels in the Step Functions User Guide. @@ -425,6 +431,12 @@ supported by DescribeExecution unless a Map Run dispatched them. # Arguments - `execution_arn`: The Amazon Resource Name (ARN) of the execution to describe. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"includedData"`: If your state machine definition is encrypted with a KMS key, callers + must have kms:Decrypt permission to decrypt the definition. Alternatively, you can call + DescribeStateMachine API with includedData = METADATA_ONLY to get a successful response + without the encrypted definition. """ function describe_execution(executionArn; aws_config::AbstractAWSConfig=global_aws_config()) return sfn( @@ -514,6 +526,15 @@ changes. about that version. The version ARN is a combination of state machine ARN and the version number separated by a colon (:). For example, stateMachineARN:1. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"includedData"`: If your state machine definition is encrypted with a KMS key, callers + must have kms:Decrypt permission to decrypt the definition. Alternatively, you can call the + API with includedData = METADATA_ONLY to get a successful response without the encrypted + definition. When calling a labelled ARN for an encrypted state machine, the includedData + = METADATA_ONLY parameter will not apply because Step Functions needs to decrypt the entire + state machine definition to get the Distributed Map state’s definition. In this case, the + API caller needs to have kms:Decrypt permission. """ function describe_state_machine( stateMachineArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -598,6 +619,12 @@ supported by EXPRESS state machines. - `execution_arn`: The Amazon Resource Name (ARN) of the execution you want state machine information for. +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"includedData"`: If your state machine definition is encrypted with a KMS key, callers + must have kms:Decrypt permission to decrypt the definition. Alternatively, you can call the + API with includedData = METADATA_ONLY to get a successful response without the encrypted + definition. """ function describe_state_machine_for_execution( executionArn; aws_config::AbstractAWSConfig=global_aws_config() @@ -1180,7 +1207,11 @@ end send_task_failure(task_token, params::Dict{String,<:Any}) Used by activity workers, Task states using the callback pattern, and optionally Task -states using the job run pattern to report that the task identified by the taskToken failed. +states using the job run pattern to report that the task identified by the taskToken +failed. For an execution with encryption enabled, Step Functions will encrypt the error and +cause fields using the KMS key for the execution role. A caller can mark a task as fail +without using any KMS permissions in the execution role if the caller provides a null value +for both error and cause fields because no data needs to be encrypted. # Arguments - `task_token`: The token that represents this task. Task tokens are generated by Step @@ -1412,6 +1443,10 @@ configuration. This API action isn't logged in CloudTrail. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"includedData"`: If your state machine definition is encrypted with a KMS key, callers + must have kms:Decrypt permission to decrypt the definition. Alternatively, you can call the + API with includedData = METADATA_ONLY to get a successful response without the encrypted + definition. - `"input"`: The string that contains the JSON input data for the execution, for example: \"input\": \"{\"first_name\" : \"test\"}\" If you don't include any JSON input data, you still must include the two braces, for example: \"input\": \"{}\" Length constraints @@ -1451,7 +1486,11 @@ end stop_execution(execution_arn) stop_execution(execution_arn, params::Dict{String,<:Any}) -Stops an execution. This API action is not supported by EXPRESS state machines. +Stops an execution. This API action is not supported by EXPRESS state machines. For an +execution with encryption enabled, Step Functions will encrypt the error and cause fields +using the KMS key for the execution role. A caller can stop an execution without using any +KMS permissions in the execution role if the caller provides a null value for both error +and cause fields because no data needs to be encrypted. # Arguments - `execution_arn`: The Amazon Resource Name (ARN) of the execution to stop. @@ -1686,11 +1725,12 @@ end update_state_machine(state_machine_arn) update_state_machine(state_machine_arn, params::Dict{String,<:Any}) -Updates an existing state machine by modifying its definition, roleArn, or -loggingConfiguration. Running executions will continue to use the previous definition and -roleArn. You must include at least one of definition or roleArn or you will receive a -MissingRequiredParameter error. A qualified state machine ARN refers to a Distributed Map -state defined within a state machine. For example, the qualified state machine ARN +Updates an existing state machine by modifying its definition, roleArn, +loggingConfiguration, or EncryptionConfiguration. Running executions will continue to use +the previous definition and roleArn. You must include at least one of definition or roleArn +or you will receive a MissingRequiredParameter error. A qualified state machine ARN refers +to a Distributed Map state defined within a state machine. For example, the qualified state +machine ARN arn:partition:states:region:account-id:stateMachine:stateMachineName/mapStateLabel refers to a Distributed Map state with a label mapStateLabel in the state machine named stateMachineName. A qualified state machine ARN can either refer to a Distributed Map state @@ -1721,6 +1761,7 @@ call UpdateStateMachine may use the previous state machine definition and roleAr Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"definition"`: The Amazon States Language definition of the state machine. See Amazon States Language. +- `"encryptionConfiguration"`: Settings to configure server-side encryption. - `"loggingConfiguration"`: Use the LoggingConfiguration data type to set CloudWatch Logs options. - `"publish"`: Specifies whether the state machine version is published. The default is @@ -1833,6 +1874,13 @@ elements, rather than raise an exception. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"maxResults"`: The maximum number of diagnostics that are returned per call. The default + and maximum value is 100. Setting the value to 0 will also use the default of 100. If the + number of diagnostics returned in the response exceeds maxResults, the value of the + truncated field in the response will be set to true. +- `"severity"`: Minimum level of diagnostics to return. ERROR returns only ERROR + diagnostics, whereas WARNING returns both WARNING and ERROR diagnostics. The default is + ERROR. - `"type"`: The target type of state machine for this definition. The default is STANDARD. """ function validate_state_machine_definition( diff --git a/src/services/ssm.jl b/src/services/ssm.jl index 181c0bb64c..82dc9a5aca 100644 --- a/src/services/ssm.jl +++ b/src/services/ssm.jl @@ -838,14 +838,18 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys About package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide. - `"RejectedPatchesAction"`: The action for Patch Manager to take on patches included in - the RejectedPackages list. ALLOW_AS_DEPENDENCY : A package in the Rejected patches list - is installed only if it is a dependency of another package. It is considered compliant with - the patch baseline, and its status is reported as InstalledOther. This is the default - action if no option is specified. BLOCK: Packages in the Rejected patches list, and - packages that include them as dependencies, aren't installed by Patch Manager under any - circumstances. If a package was installed before it was added to the Rejected patches list, - or is installed outside of Patch Manager afterward, it's considered noncompliant with the - patch baseline and its status is reported as InstalledRejected. + the RejectedPackages list. ALLOW_AS_DEPENDENCY Linux and macOS: A package in the + rejected patches list is installed only if it is a dependency of another package. It is + considered compliant with the patch baseline, and its status is reported as + INSTALLED_OTHER. This is the default action if no option is specified. Windows Server: + Windows Server doesn't support the concept of package dependencies. If a package in the + rejected patches list and already installed on the node, its status is reported as + INSTALLED_OTHER. Any package not already installed on the node is skipped. This is the + default action if no option is specified. BLOCK All OSs: Packages in the rejected + patches list, and packages that include them as dependencies, aren't installed by Patch + Manager under any circumstances. If a package was installed before it was added to the + rejected patches list, or is installed outside of Patch Manager afterward, it's considered + noncompliant with the patch baseline and its status is reported as INSTALLED_REJECTED. - `"Sources"`: Information about the patches to use to update the managed nodes, including target operating systems and source repositories. Applies to Linux managed nodes only. - `"Tags"`: Optional metadata that you assign to a resource. Tags enable you to categorize @@ -2099,9 +2103,9 @@ not return information for nodes that are either Stopped or Terminated. If you s or more node IDs, the operation returns information for those managed nodes. If you don't specify node IDs, it returns information for all your managed nodes. If you specify a node ID that isn't valid or a node that you don't own, you receive an error. The IamRole field -returned for this API operation is the Identity and Access Management (IAM) role assigned -to on-premises managed nodes. This operation does not return the IAM role for EC2 -instances. +returned for this API operation is the role assigned to an Amazon EC2 instance configured +with a Systems Manager Quick Setup host management configuration or the role assigned to an +on-premises managed node. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: @@ -2884,13 +2888,13 @@ CreatePatchBaseline, UpdatePatchBaseline, DescribeAvailablePatches, and DescribePatchBaselines. The following section lists the properties that can be used in filters for each major operating system type: AMAZON_LINUX Valid properties: PRODUCT | CLASSIFICATION | SEVERITY AMAZON_LINUX_2 Valid properties: PRODUCT | CLASSIFICATION | -SEVERITY CENTOS Valid properties: PRODUCT | CLASSIFICATION | SEVERITY DEBIAN Valid -properties: PRODUCT | PRIORITY MACOS Valid properties: PRODUCT | CLASSIFICATION -ORACLE_LINUX Valid properties: PRODUCT | CLASSIFICATION | SEVERITY -REDHAT_ENTERPRISE_LINUX Valid properties: PRODUCT | CLASSIFICATION | SEVERITY SUSE -Valid properties: PRODUCT | CLASSIFICATION | SEVERITY UBUNTU Valid properties: PRODUCT | -PRIORITY WINDOWS Valid properties: PRODUCT | PRODUCT_FAMILY | CLASSIFICATION | -MSRC_SEVERITY +SEVERITY AMAZON_LINUX_2023 Valid properties: PRODUCT | CLASSIFICATION | SEVERITY +CENTOS Valid properties: PRODUCT | CLASSIFICATION | SEVERITY DEBIAN Valid properties: +PRODUCT | PRIORITY MACOS Valid properties: PRODUCT | CLASSIFICATION ORACLE_LINUX +Valid properties: PRODUCT | CLASSIFICATION | SEVERITY REDHAT_ENTERPRISE_LINUX Valid +properties: PRODUCT | CLASSIFICATION | SEVERITY SUSE Valid properties: PRODUCT | +CLASSIFICATION | SEVERITY UBUNTU Valid properties: PRODUCT | PRIORITY WINDOWS Valid +properties: PRODUCT | PRODUCT_FAMILY | CLASSIFICATION | MSRC_SEVERITY # Arguments - `operating_system`: The operating system type for which to list patches. @@ -3117,10 +3121,15 @@ end get_command_invocation(command_id, instance_id) get_command_invocation(command_id, instance_id, params::Dict{String,<:Any}) -Returns detailed information about command execution for an invocation or plugin. -GetCommandInvocation only gives the execution status of a plugin in a document. To get the -command execution status on a specific managed node, use ListCommandInvocations. To get the -command execution status across managed nodes, use ListCommands. +Returns detailed information about command execution for an invocation or plugin. The Run +Command API follows an eventual consistency model, due to the distributed nature of the +system supporting the API. This means that the result of an API command you run that +affects your resources might not be immediately visible to all subsequent commands you run. +You should keep this in mind when you carry out an API command that immediately follows a +previous API command. GetCommandInvocation only gives the execution status of a plugin in +a document. To get the command execution status on a specific managed node, use +ListCommandInvocations. To get the command execution status across managed nodes, use +ListCommands. # Arguments - `command_id`: (Required) The parent command ID of the invocation plugin. @@ -6848,14 +6857,18 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys About package name formats for approved and rejected patch lists in the Amazon Web Services Systems Manager User Guide. - `"RejectedPatchesAction"`: The action for Patch Manager to take on patches included in - the RejectedPackages list. ALLOW_AS_DEPENDENCY : A package in the Rejected patches list - is installed only if it is a dependency of another package. It is considered compliant with - the patch baseline, and its status is reported as InstalledOther. This is the default - action if no option is specified. BLOCK: Packages in the Rejected patches list, and - packages that include them as dependencies, aren't installed by Patch Manager under any - circumstances. If a package was installed before it was added to the Rejected patches list, - or is installed outside of Patch Manager afterward, it's considered noncompliant with the - patch baseline and its status is reported as InstalledRejected. + the RejectedPackages list. ALLOW_AS_DEPENDENCY Linux and macOS: A package in the + rejected patches list is installed only if it is a dependency of another package. It is + considered compliant with the patch baseline, and its status is reported as + INSTALLED_OTHER. This is the default action if no option is specified. Windows Server: + Windows Server doesn't support the concept of package dependencies. If a package in the + rejected patches list and already installed on the node, its status is reported as + INSTALLED_OTHER. Any package not already installed on the node is skipped. This is the + default action if no option is specified. BLOCK All OSs: Packages in the rejected + patches list, and packages that include them as dependencies, aren't installed by Patch + Manager under any circumstances. If a package was installed before it was added to the + rejected patches list, or is installed outside of Patch Manager afterward, it's considered + noncompliant with the patch baseline and its status is reported as INSTALLED_REJECTED. - `"Replace"`: If True, then all fields that are required by the CreatePatchBaseline operation are also required for this API request. Optional fields that aren't specified are set to null. diff --git a/src/services/ssm_quicksetup.jl b/src/services/ssm_quicksetup.jl new file mode 100644 index 0000000000..d5079c9951 --- /dev/null +++ b/src/services/ssm_quicksetup.jl @@ -0,0 +1,423 @@ +# This file is auto-generated by AWSMetadata.jl +using AWS +using AWS.AWSServices: ssm_quicksetup +using AWS.Compat +using AWS.UUIDs + +""" + create_configuration_manager(configuration_definitions) + create_configuration_manager(configuration_definitions, params::Dict{String,<:Any}) + +Creates a Quick Setup configuration manager resource. This object is a collection of +desired state configurations for multiple configuration definitions and summaries +describing the deployments of those definitions. + +# Arguments +- `configuration_definitions`: The definition of the Quick Setup configuration that the + configuration manager deploys. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Description"`: A description of the configuration manager. +- `"Name"`: A name for the configuration manager. +- `"Tags"`: Key-value pairs of metadata to assign to the configuration manager. +""" +function create_configuration_manager( + ConfigurationDefinitions; aws_config::AbstractAWSConfig=global_aws_config() +) + return ssm_quicksetup( + "POST", + "/configurationManager", + Dict{String,Any}("ConfigurationDefinitions" => ConfigurationDefinitions); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_configuration_manager( + ConfigurationDefinitions, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ssm_quicksetup( + "POST", + "/configurationManager", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}("ConfigurationDefinitions" => ConfigurationDefinitions), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + delete_configuration_manager(manager_arn) + delete_configuration_manager(manager_arn, params::Dict{String,<:Any}) + +Deletes a configuration manager. + +# Arguments +- `manager_arn`: The ID of the configuration manager. + +""" +function delete_configuration_manager( + ManagerArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return ssm_quicksetup( + "DELETE", + "/configurationManager/$(ManagerArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function delete_configuration_manager( + ManagerArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ssm_quicksetup( + "DELETE", + "/configurationManager/$(ManagerArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_configuration_manager(manager_arn) + get_configuration_manager(manager_arn, params::Dict{String,<:Any}) + +Returns a configuration manager. + +# Arguments +- `manager_arn`: The ARN of the configuration manager. + +""" +function get_configuration_manager( + ManagerArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return ssm_quicksetup( + "GET", + "/configurationManager/$(ManagerArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function get_configuration_manager( + ManagerArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ssm_quicksetup( + "GET", + "/configurationManager/$(ManagerArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + get_service_settings() + get_service_settings(params::Dict{String,<:Any}) + +Returns settings configured for Quick Setup in the requesting Amazon Web Services account +and Amazon Web Services Region. + +""" +function get_service_settings(; aws_config::AbstractAWSConfig=global_aws_config()) + return ssm_quicksetup( + "GET", "/serviceSettings"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function get_service_settings( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ssm_quicksetup( + "GET", + "/serviceSettings", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_configuration_managers() + list_configuration_managers(params::Dict{String,<:Any}) + +Returns Quick Setup configuration managers. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: Filters the results returned by the request. +- `"MaxItems"`: Specifies the maximum number of configuration managers that are returned by + the request. +- `"StartingToken"`: The token to use when requesting a specific set of items from a list. +""" +function list_configuration_managers(; aws_config::AbstractAWSConfig=global_aws_config()) + return ssm_quicksetup( + "POST", + "/listConfigurationManagers"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_configuration_managers( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ssm_quicksetup( + "POST", + "/listConfigurationManagers", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_quick_setup_types() + list_quick_setup_types(params::Dict{String,<:Any}) + +Returns the available Quick Setup types. + +""" +function list_quick_setup_types(; aws_config::AbstractAWSConfig=global_aws_config()) + return ssm_quicksetup( + "GET", + "/listQuickSetupTypes"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_quick_setup_types( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ssm_quicksetup( + "GET", + "/listQuickSetupTypes", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + list_tags_for_resource(resource_arn) + list_tags_for_resource(resource_arn, params::Dict{String,<:Any}) + +Returns tags assigned to the resource. + +# Arguments +- `resource_arn`: The ARN of the resource the tag is assigned to. + +""" +function list_tags_for_resource( + ResourceArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return ssm_quicksetup( + "GET", + "/tags/$(ResourceArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function list_tags_for_resource( + ResourceArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ssm_quicksetup( + "GET", + "/tags/$(ResourceArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + tag_resource(resource_arn, tags) + tag_resource(resource_arn, tags, params::Dict{String,<:Any}) + +Assigns key-value pairs of metadata to Amazon Web Services resources. + +# Arguments +- `resource_arn`: The ARN of the resource to tag. +- `tags`: Key-value pairs of metadata to assign to the resource. + +""" +function tag_resource(ResourceArn, Tags; aws_config::AbstractAWSConfig=global_aws_config()) + return ssm_quicksetup( + "PUT", + "/tags/$(ResourceArn)", + Dict{String,Any}("Tags" => Tags); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function tag_resource( + ResourceArn, + Tags, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ssm_quicksetup( + "PUT", + "/tags/$(ResourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("Tags" => Tags), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + untag_resource(resource_arn, tag_keys) + untag_resource(resource_arn, tag_keys, params::Dict{String,<:Any}) + +Removes tags from the specified resource. + +# Arguments +- `resource_arn`: The ARN of the resource to remove tags from. +- `tag_keys`: The keys of the tags to remove from the resource. + +""" +function untag_resource( + ResourceArn, tagKeys; aws_config::AbstractAWSConfig=global_aws_config() +) + return ssm_quicksetup( + "DELETE", + "/tags/$(ResourceArn)", + Dict{String,Any}("tagKeys" => tagKeys); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function untag_resource( + ResourceArn, + tagKeys, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ssm_quicksetup( + "DELETE", + "/tags/$(ResourceArn)", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("tagKeys" => tagKeys), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_configuration_definition(id, manager_arn) + update_configuration_definition(id, manager_arn, params::Dict{String,<:Any}) + +Updates a Quick Setup configuration definition. + +# Arguments +- `id`: The ID of the configuration definition you want to update. +- `manager_arn`: The ARN of the configuration manager associated with the definition to + update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"LocalDeploymentAdministrationRoleArn"`: The ARN of the IAM role used to administrate + local configuration deployments. +- `"LocalDeploymentExecutionRoleName"`: The name of the IAM role used to deploy local + configurations. +- `"Parameters"`: The parameters for the configuration definition type. +- `"TypeVersion"`: The version of the Quick Setup type to use. +""" +function update_configuration_definition( + Id, ManagerArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return ssm_quicksetup( + "PUT", + "/configurationDefinition/$(ManagerArn)/$(Id)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_configuration_definition( + Id, + ManagerArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ssm_quicksetup( + "PUT", + "/configurationDefinition/$(ManagerArn)/$(Id)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_configuration_manager(manager_arn) + update_configuration_manager(manager_arn, params::Dict{String,<:Any}) + +Updates a Quick Setup configuration manager. + +# Arguments +- `manager_arn`: The ARN of the configuration manager. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Description"`: A description of the configuration manager. +- `"Name"`: A name for the configuration manager. +""" +function update_configuration_manager( + ManagerArn; aws_config::AbstractAWSConfig=global_aws_config() +) + return ssm_quicksetup( + "PUT", + "/configurationManager/$(ManagerArn)"; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_configuration_manager( + ManagerArn, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return ssm_quicksetup( + "PUT", + "/configurationManager/$(ManagerArn)", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + update_service_settings() + update_service_settings(params::Dict{String,<:Any}) + +Updates settings configured for Quick Setup. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ExplorerEnablingRoleArn"`: The IAM role used to enable Explorer. +""" +function update_service_settings(; aws_config::AbstractAWSConfig=global_aws_config()) + return ssm_quicksetup( + "PUT", "/serviceSettings"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function update_service_settings( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return ssm_quicksetup( + "PUT", + "/serviceSettings", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end diff --git a/src/services/supplychain.jl b/src/services/supplychain.jl index 10565faf15..00c0018833 100644 --- a/src/services/supplychain.jl +++ b/src/services/supplychain.jl @@ -95,10 +95,14 @@ end send_data_integration_event(data, event_group_id, event_type, instance_id) send_data_integration_event(data, event_group_id, event_type, instance_id, params::Dict{String,<:Any}) -Send transactional data events with real-time data for analysis or monitoring. +Send the transactional data payload for the event with real-time data for analysis or +monitoring. The real-time data events are stored in an Amazon Web Services service before +being processed and stored in data lake. New data events are synced with data lake at 5 PM +GMT everyday. The updated transactional data is available in data lake after ingestion. # Arguments -- `data`: The data payload of the event. +- `data`: The data payload of the event. For more information on the data schema to use, + see Data entities supported in AWS Supply Chain . - `event_group_id`: Event identifier (for example, orderId for InboundOrder) used for data sharing or partitioning. - `event_type`: The data event type. diff --git a/src/services/timestream_influxdb.jl b/src/services/timestream_influxdb.jl index 9766e407c9..5617bcf2be 100644 --- a/src/services/timestream_influxdb.jl +++ b/src/services/timestream_influxdb.jl @@ -429,9 +429,12 @@ Updates a Timestream for InfluxDB DB instance. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"dbInstanceType"`: The Timestream for InfluxDB DB instance type to run InfluxDB on. - `"dbParameterGroupIdentifier"`: The id of the DB parameter group to assign to your DB instance. DB parameter groups specify how the database is configured. For example, DB parameter groups can specify the limit for query concurrency. +- `"deploymentType"`: Specifies whether the DB instance will be deployed as a standalone + instance or with a Multi-AZ standby for high availability. - `"logDeliveryConfiguration"`: Configuration for sending InfluxDB engine logs to send to specified S3 bucket. """ diff --git a/src/services/timestream_query.jl b/src/services/timestream_query.jl index 1ed4086d20..ab7191e474 100644 --- a/src/services/timestream_query.jl +++ b/src/services/timestream_query.jl @@ -620,7 +620,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys The maximum value supported for MaxQueryTCU is 1000. To request an increase to this soft limit, contact Amazon Web Services Support. For information about the default quota for maxQueryTCU, see Default quotas. -- `"QueryPricingModel"`: The pricing model for queries in an account. +- `"QueryPricingModel"`: The pricing model for queries in an account. The + QueryPricingModel parameter is used by several Timestream operations; however, the + UpdateAccountSettings API operation doesn't recognize any values other than COMPUTE_UNITS. """ function update_account_settings(; aws_config::AbstractAWSConfig=global_aws_config()) return timestream_query( diff --git a/src/services/tnb.jl b/src/services/tnb.jl index 05df993e3a..f436b0210a 100644 --- a/src/services/tnb.jl +++ b/src/services/tnb.jl @@ -291,7 +291,7 @@ end get_sol_function_instance(vnf_instance_id) get_sol_function_instance(vnf_instance_id, params::Dict{String,<:Any}) -Gets the details of a network function instance, including the instantation state and +Gets the details of a network function instance, including the instantiation state and metadata from the function package descriptor in the network function package. A network function instance is a function in a function package . @@ -665,9 +665,10 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys actually making the request and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation. - `"tags"`: A tag is a label that you assign to an Amazon Web Services resource. Each tag - consists of a key and an optional value. When you use this API, the tags are transferred to - the network operation that is created. Use tags to search and filter your resources or - track your Amazon Web Services costs. + consists of a key and an optional value. When you use this API, the tags are only applied + to the network operation that is created. These tags are not applied to the network + instance. Use tags to search and filter your resources or track your Amazon Web Services + costs. """ function instantiate_sol_network_instance( nsInstanceId; aws_config::AbstractAWSConfig=global_aws_config() @@ -804,6 +805,8 @@ as network instance instantiation or termination. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"max_results"`: The maximum number of results to include in the response. - `"nextpage_opaque_marker"`: The token for the next page of results. +- `"nsInstanceId"`: Network instance id filter, to retrieve network operations associated + to a network instance. """ function list_sol_network_operations(; aws_config::AbstractAWSConfig=global_aws_config()) return tnb( @@ -1031,9 +1034,10 @@ delete it. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"tags"`: A tag is a label that you assign to an Amazon Web Services resource. Each tag - consists of a key and an optional value. When you use this API, the tags are transferred to - the network operation that is created. Use tags to search and filter your resources or - track your Amazon Web Services costs. + consists of a key and an optional value. When you use this API, the tags are only applied + to the network operation that is created. These tags are not applied to the network + instance. Use tags to search and filter your resources or track your Amazon Web Services + costs. """ function terminate_sol_network_instance( nsInstanceId; aws_config::AbstractAWSConfig=global_aws_config() @@ -1148,20 +1152,28 @@ end Update a network instance. A network instance is a single network created in Amazon Web Services TNB that can be deployed and on which life-cycle operations (like terminate, -update, and delete) can be performed. +update, and delete) can be performed. Choose the updateType parameter to target the +necessary update of the network instance. # Arguments - `ns_instance_id`: ID of the network instance. -- `update_type`: The type of update. +- `update_type`: The type of update. Use the MODIFY_VNF_INFORMATION update type, to + update a specific network function configuration, in the network instance. Use the + UPDATE_NS update type, to update the network instance to a new network service descriptor. # Optional Parameters Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"modifyVnfInfoData"`: Identifies the network function information parameters and/or the - configurable properties of the network function to be modified. + configurable properties of the network function to be modified. Include this property only + if the update type is MODIFY_VNF_INFORMATION. - `"tags"`: A tag is a label that you assign to an Amazon Web Services resource. Each tag - consists of a key and an optional value. When you use this API, the tags are transferred to - the network operation that is created. Use tags to search and filter your resources or - track your Amazon Web Services costs. + consists of a key and an optional value. When you use this API, the tags are only applied + to the network operation that is created. These tags are not applied to the network + instance. Use tags to search and filter your resources or track your Amazon Web Services + costs. +- `"updateNs"`: Identifies the network service descriptor and the configurable properties + of the descriptor, to be used for the update. Include this property only if the update type + is UPDATE_NS. """ function update_sol_network_instance( nsInstanceId, updateType; aws_config::AbstractAWSConfig=global_aws_config() diff --git a/src/services/wafv2.jl b/src/services/wafv2.jl index 22acbfee95..73e5b43ad0 100644 --- a/src/services/wafv2.jl +++ b/src/services/wafv2.jl @@ -2583,12 +2583,15 @@ end put_permission_policy(policy, resource_arn) put_permission_policy(policy, resource_arn, params::Dict{String,<:Any}) -Attaches an IAM policy to the specified resource. Use this to share a rule group across -accounts. You must be the owner of the rule group to perform this operation. This action is -subject to the following restrictions: You can attach only one policy with each -PutPermissionPolicy request. The ARN in the request must be a valid WAF RuleGroup ARN and -the rule group must exist in the same Region. The user making the request must be the -owner of the rule group. +Use this to share a rule group with other accounts. This action attaches an IAM policy to +the specified resource. You must be the owner of the rule group to perform this operation. +This action is subject to the following restrictions: You can attach only one policy with +each PutPermissionPolicy request. The ARN in the request must be a valid WAF RuleGroup +ARN and the rule group must exist in the same Region. The user making the request must be +the owner of the rule group. If a rule group has been shared with your account, you can +access it through the call GetRuleGroup, and you can reference it in CreateWebACL and +UpdateWebACL. Rule groups that are shared with you don't appear in your WAF console rule +groups listing. # Arguments - `policy`: The policy to attach to the specified rule group. The policy specifications diff --git a/src/services/workspaces.jl b/src/services/workspaces.jl index 2d5b341c5f..f4017d2f9f 100644 --- a/src/services/workspaces.jl +++ b/src/services/workspaces.jl @@ -727,7 +727,9 @@ WorkSpaces are created. The MANUAL running mode value is only supported by Am WorkSpaces Core. Contact your account team to be allow-listed to use this value. For more information, see Amazon WorkSpaces Core. You don't need to specify the PCOIP protocol for Linux bundles because WSP is the default protocol for those bundles. User-decoupled -WorkSpaces are only supported by Amazon WorkSpaces Core. +WorkSpaces are only supported by Amazon WorkSpaces Core. Review your running mode to +ensure you are using one that is optimal for your needs and budget. For more information on +switching running modes, see Can I switch between hourly and monthly billing? # Arguments - `workspaces`: The WorkSpaces to create. You can specify up to 25 WorkSpaces. @@ -756,6 +758,75 @@ function create_workspaces( ) end +""" + create_workspaces_pool(bundle_id, capacity, description, directory_id, pool_name) + create_workspaces_pool(bundle_id, capacity, description, directory_id, pool_name, params::Dict{String,<:Any}) + +Creates a pool of WorkSpaces. + +# Arguments +- `bundle_id`: The identifier of the bundle for the pool. +- `capacity`: The user capacity of the pool. +- `description`: The pool description. +- `directory_id`: The identifier of the directory for the pool. +- `pool_name`: The name of the pool. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ApplicationSettings"`: Indicates the application settings of the pool. +- `"Tags"`: The tags for the pool. +- `"TimeoutSettings"`: Indicates the timeout settings of the pool. +""" +function create_workspaces_pool( + BundleId, + Capacity, + Description, + DirectoryId, + PoolName; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return workspaces( + "CreateWorkspacesPool", + Dict{String,Any}( + "BundleId" => BundleId, + "Capacity" => Capacity, + "Description" => Description, + "DirectoryId" => DirectoryId, + "PoolName" => PoolName, + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function create_workspaces_pool( + BundleId, + Capacity, + Description, + DirectoryId, + PoolName, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return workspaces( + "CreateWorkspacesPool", + Dict{String,Any}( + mergewith( + _merge, + Dict{String,Any}( + "BundleId" => BundleId, + "Capacity" => Capacity, + "Description" => Description, + "DirectoryId" => DirectoryId, + "PoolName" => PoolName, + ), + params, + ), + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ delete_account_link_invitation(link_id) delete_account_link_invitation(link_id, params::Dict{String,<:Any}) @@ -1695,9 +1766,11 @@ Describes the available directories that are registered with Amazon WorkSpaces. Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"DirectoryIds"`: The identifiers of the directories. If the value is null, all directories are retrieved. +- `"Filters"`: The filter condition for the WorkSpaces. - `"Limit"`: The maximum number of directories to return. - `"NextToken"`: If you received a NextToken from a previous call that was paginated, provide this token to receive the next set of results. +- `"WorkspaceDirectoryNames"`: The names of the WorkSpace directories. """ function describe_workspace_directories(; aws_config::AbstractAWSConfig=global_aws_config()) return workspaces( @@ -1890,6 +1963,73 @@ function describe_workspaces_connection_status( ) end +""" + describe_workspaces_pool_sessions(pool_id) + describe_workspaces_pool_sessions(pool_id, params::Dict{String,<:Any}) + +Retrieves a list that describes the streaming sessions for a specified pool. + +# Arguments +- `pool_id`: The identifier of the pool. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Limit"`: The maximum number of items to return. +- `"NextToken"`: If you received a NextToken from a previous call that was paginated, + provide this token to receive the next set of results. +- `"UserId"`: The identifier of the user. +""" +function describe_workspaces_pool_sessions( + PoolId; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "DescribeWorkspacesPoolSessions", + Dict{String,Any}("PoolId" => PoolId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function describe_workspaces_pool_sessions( + PoolId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "DescribeWorkspacesPoolSessions", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("PoolId" => PoolId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + describe_workspaces_pools() + describe_workspaces_pools(params::Dict{String,<:Any}) + +Describes the specified WorkSpaces Pools. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"Filters"`: The filter conditions for the WorkSpaces Pool to return. +- `"Limit"`: The maximum number of items to return. +- `"NextToken"`: If you received a NextToken from a previous call that was paginated, + provide this token to receive the next set of results. +- `"PoolIds"`: The identifier of the WorkSpaces Pools. +""" +function describe_workspaces_pools(; aws_config::AbstractAWSConfig=global_aws_config()) + return workspaces( + "DescribeWorkspacesPools"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET + ) +end +function describe_workspaces_pools( + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "DescribeWorkspacesPools", + params; + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ disassociate_connection_alias(alias_id) disassociate_connection_alias(alias_id, params::Dict{String,<:Any}) @@ -2115,8 +2255,9 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys - `"Applications"`: If specified, the version of Microsoft Office to subscribe to. Valid only for Windows 10 and 11 BYOL images. For more information about subscribing to Office for BYOL images, see Bring Your Own Windows Desktop Licenses. Although this parameter - is an array, only one item is allowed at this time. Windows 11 only supports - Microsoft_Office_2019. + is an array, only one item is allowed at this time. During the image import process, + non-GPU WSP WorkSpaces with Windows 11 support only Microsoft_Office_2019. GPU WSP + WorkSpaces with Windows 11 do not support Office installation. - `"Tags"`: The tags. Each WorkSpaces resource can have a maximum of 50 tags. """ function import_workspace_image( @@ -2501,6 +2642,44 @@ function modify_selfservice_permissions( ) end +""" + modify_streaming_properties(resource_id) + modify_streaming_properties(resource_id, params::Dict{String,<:Any}) + +Modifies the specified streaming properties. + +# Arguments +- `resource_id`: The identifier of the resource. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"StreamingProperties"`: The streaming properties to configure. +""" +function modify_streaming_properties( + ResourceId; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "ModifyStreamingProperties", + Dict{String,Any}("ResourceId" => ResourceId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function modify_streaming_properties( + ResourceId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return workspaces( + "ModifyStreamingProperties", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("ResourceId" => ResourceId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ modify_workspace_access_properties(resource_id, workspace_access_properties) modify_workspace_access_properties(resource_id, workspace_access_properties, params::Dict{String,<:Any}) @@ -2775,29 +2954,30 @@ function rebuild_workspaces( end """ - register_workspace_directory(directory_id, enable_work_docs) - register_workspace_directory(directory_id, enable_work_docs, params::Dict{String,<:Any}) + register_workspace_directory() + register_workspace_directory(params::Dict{String,<:Any}) Registers the specified directory. This operation is asynchronous and returns before the WorkSpace directory is registered. If this is the first time you are registering a directory, you will need to create the workspaces_DefaultRole role before you can register a directory. For more information, see Creating the workspaces_DefaultRole Role. -# Arguments -- `directory_id`: The identifier of the directory. You cannot register a directory if it +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ActiveDirectoryConfig"`: The active directory config of the directory. +- `"DirectoryId"`: The identifier of the directory. You cannot register a directory if it does not have a status of Active. If the directory does not have a status of Active, you will receive an InvalidResourceStateException error. If you have already registered the maximum number of directories that you can register with Amazon WorkSpaces, you will receive a ResourceLimitExceededException error. Deregister directories that you are not using for WorkSpaces, and try again. -- `enable_work_docs`: Indicates whether Amazon WorkDocs is enabled or disabled. If you have - enabled this parameter and WorkDocs is not available in the Region, you will receive an - OperationNotSupportedException error. Set EnableWorkDocs to disabled, and try again. - -# Optional Parameters -Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: - `"EnableSelfService"`: Indicates whether self-service capabilities are enabled or disabled. +- `"EnableWorkDocs"`: Indicates whether Amazon WorkDocs is enabled or disabled. If you have + enabled this parameter and WorkDocs is not available in the Region, you will receive an + OperationNotSupportedException error. Set EnableWorkDocs to disabled, and try again. +- `"IdcInstanceArn"`: The Amazon Resource Name (ARN) of the identity center instance. +- `"MicrosoftEntraConfig"`: The details about Microsoft Entra config. - `"SubnetIds"`: The identifiers of the subnets for your virtual private cloud (VPC). Make sure that the subnets are in supported Availability Zones. The subnets must also be in separate Availability Zones. If these conditions are not met, you will receive an @@ -2808,34 +2988,22 @@ Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys Web Services account must be enabled for BYOL. If your account has not been enabled for BYOL, you will receive an InvalidParameterValuesException error. For more information about BYOL images, see Bring Your Own Windows Desktop Images. +- `"UserIdentityType"`: The type of identity management the user is using. +- `"WorkspaceDirectoryDescription"`: Description of the directory to register. +- `"WorkspaceDirectoryName"`: The name of the directory to register. +- `"WorkspaceType"`: Indicates whether the directory's WorkSpace type is personal or pools. """ -function register_workspace_directory( - DirectoryId, EnableWorkDocs; aws_config::AbstractAWSConfig=global_aws_config() -) +function register_workspace_directory(; aws_config::AbstractAWSConfig=global_aws_config()) return workspaces( - "RegisterWorkspaceDirectory", - Dict{String,Any}("DirectoryId" => DirectoryId, "EnableWorkDocs" => EnableWorkDocs); - aws_config=aws_config, - feature_set=SERVICE_FEATURE_SET, + "RegisterWorkspaceDirectory"; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET ) end function register_workspace_directory( - DirectoryId, - EnableWorkDocs, - params::AbstractDict{String}; - aws_config::AbstractAWSConfig=global_aws_config(), + params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() ) return workspaces( "RegisterWorkspaceDirectory", - Dict{String,Any}( - mergewith( - _merge, - Dict{String,Any}( - "DirectoryId" => DirectoryId, "EnableWorkDocs" => EnableWorkDocs - ), - params, - ), - ); + params; aws_config=aws_config, feature_set=SERVICE_FEATURE_SET, ) @@ -2958,7 +3126,7 @@ end start_workspaces(start_workspace_requests, params::Dict{String,<:Any}) Starts the specified WorkSpaces. You cannot start a WorkSpace unless it has a running mode -of AutoStop and a state of STOPPED. +of AutoStop or Manual and a state of STOPPED. # Arguments - `start_workspace_requests`: The WorkSpaces to start. You can specify up to 25 WorkSpaces. @@ -2993,12 +3161,42 @@ function start_workspaces( ) end +""" + start_workspaces_pool(pool_id) + start_workspaces_pool(pool_id, params::Dict{String,<:Any}) + +Starts the specified pool. You cannot start a pool unless it has a running mode of AutoStop +and a state of STOPPED. + +# Arguments +- `pool_id`: The identifier of the pool. + +""" +function start_workspaces_pool(PoolId; aws_config::AbstractAWSConfig=global_aws_config()) + return workspaces( + "StartWorkspacesPool", + Dict{String,Any}("PoolId" => PoolId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function start_workspaces_pool( + PoolId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "StartWorkspacesPool", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("PoolId" => PoolId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ stop_workspaces(stop_workspace_requests) stop_workspaces(stop_workspace_requests, params::Dict{String,<:Any}) - Stops the specified WorkSpaces. You cannot stop a WorkSpace unless it has a running mode -of AutoStop and a state of AVAILABLE, IMPAIRED, UNHEALTHY, or ERROR. +Stops the specified WorkSpaces. You cannot stop a WorkSpace unless it has a running mode of +AutoStop or Manual and a state of AVAILABLE, IMPAIRED, UNHEALTHY, or ERROR. # Arguments - `stop_workspace_requests`: The WorkSpaces to stop. You can specify up to 25 WorkSpaces. @@ -3033,6 +3231,36 @@ function stop_workspaces( ) end +""" + stop_workspaces_pool(pool_id) + stop_workspaces_pool(pool_id, params::Dict{String,<:Any}) + +Stops the specified pool. You cannot stop a WorkSpace pool unless it has a running mode of +AutoStop and a state of AVAILABLE, IMPAIRED, UNHEALTHY, or ERROR. + +# Arguments +- `pool_id`: The identifier of the pool. + +""" +function stop_workspaces_pool(PoolId; aws_config::AbstractAWSConfig=global_aws_config()) + return workspaces( + "StopWorkspacesPool", + Dict{String,Any}("PoolId" => PoolId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function stop_workspaces_pool( + PoolId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "StopWorkspacesPool", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("PoolId" => PoolId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ terminate_workspaces(terminate_workspace_requests) terminate_workspaces(terminate_workspace_requests, params::Dict{String,<:Any}) @@ -3090,6 +3318,72 @@ function terminate_workspaces( ) end +""" + terminate_workspaces_pool(pool_id) + terminate_workspaces_pool(pool_id, params::Dict{String,<:Any}) + +Terminates the specified pool. + +# Arguments +- `pool_id`: The identifier of the pool. + +""" +function terminate_workspaces_pool( + PoolId; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "TerminateWorkspacesPool", + Dict{String,Any}("PoolId" => PoolId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function terminate_workspaces_pool( + PoolId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "TerminateWorkspacesPool", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("PoolId" => PoolId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + +""" + terminate_workspaces_pool_session(session_id) + terminate_workspaces_pool_session(session_id, params::Dict{String,<:Any}) + +Terminates the pool session. + +# Arguments +- `session_id`: The identifier of the pool session. + +""" +function terminate_workspaces_pool_session( + SessionId; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "TerminateWorkspacesPoolSession", + Dict{String,Any}("SessionId" => SessionId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function terminate_workspaces_pool_session( + SessionId, + params::AbstractDict{String}; + aws_config::AbstractAWSConfig=global_aws_config(), +) + return workspaces( + "TerminateWorkspacesPoolSession", + Dict{String,Any}( + mergewith(_merge, Dict{String,Any}("SessionId" => SessionId), params) + ); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end + """ update_connect_client_add_in(add_in_id, resource_id) update_connect_client_add_in(add_in_id, resource_id, params::Dict{String,<:Any}) @@ -3334,3 +3628,40 @@ function update_workspace_image_permission( feature_set=SERVICE_FEATURE_SET, ) end + +""" + update_workspaces_pool(pool_id) + update_workspaces_pool(pool_id, params::Dict{String,<:Any}) + +Updates the specified pool. + +# Arguments +- `pool_id`: The identifier of the specified pool to update. + +# Optional Parameters +Optional parameters can be passed as a `params::Dict{String,<:Any}`. Valid keys are: +- `"ApplicationSettings"`: The persistent application settings for users in the pool. +- `"BundleId"`: The identifier of the bundle. +- `"Capacity"`: The desired capacity for the pool. +- `"Description"`: Describes the specified pool to update. +- `"DirectoryId"`: The identifier of the directory. +- `"TimeoutSettings"`: Indicates the timeout settings of the specified pool. +""" +function update_workspaces_pool(PoolId; aws_config::AbstractAWSConfig=global_aws_config()) + return workspaces( + "UpdateWorkspacesPool", + Dict{String,Any}("PoolId" => PoolId); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end +function update_workspaces_pool( + PoolId, params::AbstractDict{String}; aws_config::AbstractAWSConfig=global_aws_config() +) + return workspaces( + "UpdateWorkspacesPool", + Dict{String,Any}(mergewith(_merge, Dict{String,Any}("PoolId" => PoolId), params)); + aws_config=aws_config, + feature_set=SERVICE_FEATURE_SET, + ) +end